xref: /openbmc/linux/fs/notify/inotify/inotify_user.c (revision 867e6d38)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * fs/inotify_user.c - inotify support for userspace
4  *
5  * Authors:
6  *	John McCutchan	<ttb@tentacle.dhs.org>
7  *	Robert Love	<rml@novell.com>
8  *
9  * Copyright (C) 2005 John McCutchan
10  * Copyright 2006 Hewlett-Packard Development Company, L.P.
11  *
12  * Copyright (C) 2009 Eric Paris <Red Hat Inc>
13  * inotify was largely rewriten to make use of the fsnotify infrastructure
14  */
15 
16 #include <linux/file.h>
17 #include <linux/fs.h> /* struct inode */
18 #include <linux/fsnotify_backend.h>
19 #include <linux/idr.h>
20 #include <linux/init.h> /* fs_initcall */
21 #include <linux/inotify.h>
22 #include <linux/kernel.h> /* roundup() */
23 #include <linux/namei.h> /* LOOKUP_FOLLOW */
24 #include <linux/sched/signal.h>
25 #include <linux/slab.h> /* struct kmem_cache */
26 #include <linux/syscalls.h>
27 #include <linux/types.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/uaccess.h>
30 #include <linux/poll.h>
31 #include <linux/wait.h>
32 #include <linux/memcontrol.h>
33 #include <linux/security.h>
34 
35 #include "inotify.h"
36 #include "../fdinfo.h"
37 
38 #include <asm/ioctls.h>
39 
40 /*
41  * An inotify watch requires allocating an inotify_inode_mark structure as
42  * well as pinning the watched inode. Doubling the size of a VFS inode
43  * should be more than enough to cover the additional filesystem inode
44  * size increase.
45  */
46 #define INOTIFY_WATCH_COST	(sizeof(struct inotify_inode_mark) + \
47 				 2 * sizeof(struct inode))
48 
49 /* configurable via /proc/sys/fs/inotify/ */
50 static int inotify_max_queued_events __read_mostly;
51 
52 struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
53 
54 #ifdef CONFIG_SYSCTL
55 
56 #include <linux/sysctl.h>
57 
58 struct ctl_table inotify_table[] = {
59 	{
60 		.procname	= "max_user_instances",
61 		.data		= &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES],
62 		.maxlen		= sizeof(int),
63 		.mode		= 0644,
64 		.proc_handler	= proc_dointvec_minmax,
65 		.extra1		= SYSCTL_ZERO,
66 	},
67 	{
68 		.procname	= "max_user_watches",
69 		.data		= &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES],
70 		.maxlen		= sizeof(int),
71 		.mode		= 0644,
72 		.proc_handler	= proc_dointvec_minmax,
73 		.extra1		= SYSCTL_ZERO,
74 	},
75 	{
76 		.procname	= "max_queued_events",
77 		.data		= &inotify_max_queued_events,
78 		.maxlen		= sizeof(int),
79 		.mode		= 0644,
80 		.proc_handler	= proc_dointvec_minmax,
81 		.extra1		= SYSCTL_ZERO
82 	},
83 	{ }
84 };
85 #endif /* CONFIG_SYSCTL */
86 
87 static inline __u32 inotify_arg_to_mask(struct inode *inode, u32 arg)
88 {
89 	__u32 mask;
90 
91 	/*
92 	 * Everything should accept their own ignored and should receive events
93 	 * when the inode is unmounted.  All directories care about children.
94 	 */
95 	mask = (FS_IN_IGNORED | FS_UNMOUNT);
96 	if (S_ISDIR(inode->i_mode))
97 		mask |= FS_EVENT_ON_CHILD;
98 
99 	/* mask off the flags used to open the fd */
100 	mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
101 
102 	return mask;
103 }
104 
105 static inline u32 inotify_mask_to_arg(__u32 mask)
106 {
107 	return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
108 		       IN_Q_OVERFLOW);
109 }
110 
111 /* intofiy userspace file descriptor functions */
112 static __poll_t inotify_poll(struct file *file, poll_table *wait)
113 {
114 	struct fsnotify_group *group = file->private_data;
115 	__poll_t ret = 0;
116 
117 	poll_wait(file, &group->notification_waitq, wait);
118 	spin_lock(&group->notification_lock);
119 	if (!fsnotify_notify_queue_is_empty(group))
120 		ret = EPOLLIN | EPOLLRDNORM;
121 	spin_unlock(&group->notification_lock);
122 
123 	return ret;
124 }
125 
126 static int round_event_name_len(struct fsnotify_event *fsn_event)
127 {
128 	struct inotify_event_info *event;
129 
130 	event = INOTIFY_E(fsn_event);
131 	if (!event->name_len)
132 		return 0;
133 	return roundup(event->name_len + 1, sizeof(struct inotify_event));
134 }
135 
136 /*
137  * Get an inotify_kernel_event if one exists and is small
138  * enough to fit in "count". Return an error pointer if
139  * not large enough.
140  *
141  * Called with the group->notification_lock held.
142  */
143 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
144 					    size_t count)
145 {
146 	size_t event_size = sizeof(struct inotify_event);
147 	struct fsnotify_event *event;
148 
149 	event = fsnotify_peek_first_event(group);
150 	if (!event)
151 		return NULL;
152 
153 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
154 
155 	event_size += round_event_name_len(event);
156 	if (event_size > count)
157 		return ERR_PTR(-EINVAL);
158 
159 	/* held the notification_lock the whole time, so this is the
160 	 * same event we peeked above */
161 	fsnotify_remove_first_event(group);
162 
163 	return event;
164 }
165 
166 /*
167  * Copy an event to user space, returning how much we copied.
168  *
169  * We already checked that the event size is smaller than the
170  * buffer we had in "get_one_event()" above.
171  */
172 static ssize_t copy_event_to_user(struct fsnotify_group *group,
173 				  struct fsnotify_event *fsn_event,
174 				  char __user *buf)
175 {
176 	struct inotify_event inotify_event;
177 	struct inotify_event_info *event;
178 	size_t event_size = sizeof(struct inotify_event);
179 	size_t name_len;
180 	size_t pad_name_len;
181 
182 	pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event);
183 
184 	event = INOTIFY_E(fsn_event);
185 	name_len = event->name_len;
186 	/*
187 	 * round up name length so it is a multiple of event_size
188 	 * plus an extra byte for the terminating '\0'.
189 	 */
190 	pad_name_len = round_event_name_len(fsn_event);
191 	inotify_event.len = pad_name_len;
192 	inotify_event.mask = inotify_mask_to_arg(event->mask);
193 	inotify_event.wd = event->wd;
194 	inotify_event.cookie = event->sync_cookie;
195 
196 	/* send the main event */
197 	if (copy_to_user(buf, &inotify_event, event_size))
198 		return -EFAULT;
199 
200 	buf += event_size;
201 
202 	/*
203 	 * fsnotify only stores the pathname, so here we have to send the pathname
204 	 * and then pad that pathname out to a multiple of sizeof(inotify_event)
205 	 * with zeros.
206 	 */
207 	if (pad_name_len) {
208 		/* copy the path name */
209 		if (copy_to_user(buf, event->name, name_len))
210 			return -EFAULT;
211 		buf += name_len;
212 
213 		/* fill userspace with 0's */
214 		if (clear_user(buf, pad_name_len - name_len))
215 			return -EFAULT;
216 		event_size += pad_name_len;
217 	}
218 
219 	return event_size;
220 }
221 
222 static ssize_t inotify_read(struct file *file, char __user *buf,
223 			    size_t count, loff_t *pos)
224 {
225 	struct fsnotify_group *group;
226 	struct fsnotify_event *kevent;
227 	char __user *start;
228 	int ret;
229 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
230 
231 	start = buf;
232 	group = file->private_data;
233 
234 	add_wait_queue(&group->notification_waitq, &wait);
235 	while (1) {
236 		spin_lock(&group->notification_lock);
237 		kevent = get_one_event(group, count);
238 		spin_unlock(&group->notification_lock);
239 
240 		pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
241 
242 		if (kevent) {
243 			ret = PTR_ERR(kevent);
244 			if (IS_ERR(kevent))
245 				break;
246 			ret = copy_event_to_user(group, kevent, buf);
247 			fsnotify_destroy_event(group, kevent);
248 			if (ret < 0)
249 				break;
250 			buf += ret;
251 			count -= ret;
252 			continue;
253 		}
254 
255 		ret = -EAGAIN;
256 		if (file->f_flags & O_NONBLOCK)
257 			break;
258 		ret = -ERESTARTSYS;
259 		if (signal_pending(current))
260 			break;
261 
262 		if (start != buf)
263 			break;
264 
265 		wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
266 	}
267 	remove_wait_queue(&group->notification_waitq, &wait);
268 
269 	if (start != buf && ret != -EFAULT)
270 		ret = buf - start;
271 	return ret;
272 }
273 
274 static int inotify_release(struct inode *ignored, struct file *file)
275 {
276 	struct fsnotify_group *group = file->private_data;
277 
278 	pr_debug("%s: group=%p\n", __func__, group);
279 
280 	/* free this group, matching get was inotify_init->fsnotify_obtain_group */
281 	fsnotify_destroy_group(group);
282 
283 	return 0;
284 }
285 
286 static long inotify_ioctl(struct file *file, unsigned int cmd,
287 			  unsigned long arg)
288 {
289 	struct fsnotify_group *group;
290 	struct fsnotify_event *fsn_event;
291 	void __user *p;
292 	int ret = -ENOTTY;
293 	size_t send_len = 0;
294 
295 	group = file->private_data;
296 	p = (void __user *) arg;
297 
298 	pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
299 
300 	switch (cmd) {
301 	case FIONREAD:
302 		spin_lock(&group->notification_lock);
303 		list_for_each_entry(fsn_event, &group->notification_list,
304 				    list) {
305 			send_len += sizeof(struct inotify_event);
306 			send_len += round_event_name_len(fsn_event);
307 		}
308 		spin_unlock(&group->notification_lock);
309 		ret = put_user(send_len, (int __user *) p);
310 		break;
311 #ifdef CONFIG_CHECKPOINT_RESTORE
312 	case INOTIFY_IOC_SETNEXTWD:
313 		ret = -EINVAL;
314 		if (arg >= 1 && arg <= INT_MAX) {
315 			struct inotify_group_private_data *data;
316 
317 			data = &group->inotify_data;
318 			spin_lock(&data->idr_lock);
319 			idr_set_cursor(&data->idr, (unsigned int)arg);
320 			spin_unlock(&data->idr_lock);
321 			ret = 0;
322 		}
323 		break;
324 #endif /* CONFIG_CHECKPOINT_RESTORE */
325 	}
326 
327 	return ret;
328 }
329 
330 static const struct file_operations inotify_fops = {
331 	.show_fdinfo	= inotify_show_fdinfo,
332 	.poll		= inotify_poll,
333 	.read		= inotify_read,
334 	.fasync		= fsnotify_fasync,
335 	.release	= inotify_release,
336 	.unlocked_ioctl	= inotify_ioctl,
337 	.compat_ioctl	= inotify_ioctl,
338 	.llseek		= noop_llseek,
339 };
340 
341 
342 /*
343  * find_inode - resolve a user-given path to a specific inode
344  */
345 static int inotify_find_inode(const char __user *dirname, struct path *path,
346 						unsigned int flags, __u64 mask)
347 {
348 	int error;
349 
350 	error = user_path_at(AT_FDCWD, dirname, flags, path);
351 	if (error)
352 		return error;
353 	/* you can only watch an inode if you have read permissions on it */
354 	error = path_permission(path, MAY_READ);
355 	if (error) {
356 		path_put(path);
357 		return error;
358 	}
359 	error = security_path_notify(path, mask,
360 				FSNOTIFY_OBJ_TYPE_INODE);
361 	if (error)
362 		path_put(path);
363 
364 	return error;
365 }
366 
367 static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
368 			      struct inotify_inode_mark *i_mark)
369 {
370 	int ret;
371 
372 	idr_preload(GFP_KERNEL);
373 	spin_lock(idr_lock);
374 
375 	ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT);
376 	if (ret >= 0) {
377 		/* we added the mark to the idr, take a reference */
378 		i_mark->wd = ret;
379 		fsnotify_get_mark(&i_mark->fsn_mark);
380 	}
381 
382 	spin_unlock(idr_lock);
383 	idr_preload_end();
384 	return ret < 0 ? ret : 0;
385 }
386 
387 static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
388 								int wd)
389 {
390 	struct idr *idr = &group->inotify_data.idr;
391 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
392 	struct inotify_inode_mark *i_mark;
393 
394 	assert_spin_locked(idr_lock);
395 
396 	i_mark = idr_find(idr, wd);
397 	if (i_mark) {
398 		struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
399 
400 		fsnotify_get_mark(fsn_mark);
401 		/* One ref for being in the idr, one ref we just took */
402 		BUG_ON(refcount_read(&fsn_mark->refcnt) < 2);
403 	}
404 
405 	return i_mark;
406 }
407 
408 static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
409 							 int wd)
410 {
411 	struct inotify_inode_mark *i_mark;
412 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
413 
414 	spin_lock(idr_lock);
415 	i_mark = inotify_idr_find_locked(group, wd);
416 	spin_unlock(idr_lock);
417 
418 	return i_mark;
419 }
420 
421 /*
422  * Remove the mark from the idr (if present) and drop the reference
423  * on the mark because it was in the idr.
424  */
425 static void inotify_remove_from_idr(struct fsnotify_group *group,
426 				    struct inotify_inode_mark *i_mark)
427 {
428 	struct idr *idr = &group->inotify_data.idr;
429 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
430 	struct inotify_inode_mark *found_i_mark = NULL;
431 	int wd;
432 
433 	spin_lock(idr_lock);
434 	wd = i_mark->wd;
435 
436 	/*
437 	 * does this i_mark think it is in the idr?  we shouldn't get called
438 	 * if it wasn't....
439 	 */
440 	if (wd == -1) {
441 		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
442 			__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
443 		goto out;
444 	}
445 
446 	/* Lets look in the idr to see if we find it */
447 	found_i_mark = inotify_idr_find_locked(group, wd);
448 	if (unlikely(!found_i_mark)) {
449 		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
450 			__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
451 		goto out;
452 	}
453 
454 	/*
455 	 * We found an mark in the idr at the right wd, but it's
456 	 * not the mark we were told to remove.  eparis seriously
457 	 * fucked up somewhere.
458 	 */
459 	if (unlikely(found_i_mark != i_mark)) {
460 		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
461 			"found_i_mark=%p found_i_mark->wd=%d "
462 			"found_i_mark->group=%p\n", __func__, i_mark,
463 			i_mark->wd, i_mark->fsn_mark.group, found_i_mark,
464 			found_i_mark->wd, found_i_mark->fsn_mark.group);
465 		goto out;
466 	}
467 
468 	/*
469 	 * One ref for being in the idr
470 	 * one ref grabbed by inotify_idr_find
471 	 */
472 	if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) {
473 		printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
474 			 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
475 		/* we can't really recover with bad ref cnting.. */
476 		BUG();
477 	}
478 
479 	idr_remove(idr, wd);
480 	/* Removed from the idr, drop that ref. */
481 	fsnotify_put_mark(&i_mark->fsn_mark);
482 out:
483 	i_mark->wd = -1;
484 	spin_unlock(idr_lock);
485 	/* match the ref taken by inotify_idr_find_locked() */
486 	if (found_i_mark)
487 		fsnotify_put_mark(&found_i_mark->fsn_mark);
488 }
489 
490 /*
491  * Send IN_IGNORED for this wd, remove this wd from the idr.
492  */
493 void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
494 				    struct fsnotify_group *group)
495 {
496 	struct inotify_inode_mark *i_mark;
497 
498 	/* Queue ignore event for the watch */
499 	inotify_handle_inode_event(fsn_mark, FS_IN_IGNORED, NULL, NULL, NULL,
500 				   0);
501 
502 	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
503 	/* remove this mark from the idr */
504 	inotify_remove_from_idr(group, i_mark);
505 
506 	dec_inotify_watches(group->inotify_data.ucounts);
507 }
508 
509 static int inotify_update_existing_watch(struct fsnotify_group *group,
510 					 struct inode *inode,
511 					 u32 arg)
512 {
513 	struct fsnotify_mark *fsn_mark;
514 	struct inotify_inode_mark *i_mark;
515 	__u32 old_mask, new_mask;
516 	__u32 mask;
517 	int add = (arg & IN_MASK_ADD);
518 	int create = (arg & IN_MASK_CREATE);
519 	int ret;
520 
521 	mask = inotify_arg_to_mask(inode, arg);
522 
523 	fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
524 	if (!fsn_mark)
525 		return -ENOENT;
526 	else if (create) {
527 		ret = -EEXIST;
528 		goto out;
529 	}
530 
531 	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
532 
533 	spin_lock(&fsn_mark->lock);
534 	old_mask = fsn_mark->mask;
535 	if (add)
536 		fsn_mark->mask |= mask;
537 	else
538 		fsn_mark->mask = mask;
539 	new_mask = fsn_mark->mask;
540 	spin_unlock(&fsn_mark->lock);
541 
542 	if (old_mask != new_mask) {
543 		/* more bits in old than in new? */
544 		int dropped = (old_mask & ~new_mask);
545 		/* more bits in this fsn_mark than the inode's mask? */
546 		int do_inode = (new_mask & ~inode->i_fsnotify_mask);
547 
548 		/* update the inode with this new fsn_mark */
549 		if (dropped || do_inode)
550 			fsnotify_recalc_mask(inode->i_fsnotify_marks);
551 
552 	}
553 
554 	/* return the wd */
555 	ret = i_mark->wd;
556 
557 out:
558 	/* match the get from fsnotify_find_mark() */
559 	fsnotify_put_mark(fsn_mark);
560 
561 	return ret;
562 }
563 
564 static int inotify_new_watch(struct fsnotify_group *group,
565 			     struct inode *inode,
566 			     u32 arg)
567 {
568 	struct inotify_inode_mark *tmp_i_mark;
569 	__u32 mask;
570 	int ret;
571 	struct idr *idr = &group->inotify_data.idr;
572 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
573 
574 	mask = inotify_arg_to_mask(inode, arg);
575 
576 	tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
577 	if (unlikely(!tmp_i_mark))
578 		return -ENOMEM;
579 
580 	fsnotify_init_mark(&tmp_i_mark->fsn_mark, group);
581 	tmp_i_mark->fsn_mark.mask = mask;
582 	tmp_i_mark->wd = -1;
583 
584 	ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark);
585 	if (ret)
586 		goto out_err;
587 
588 	/* increment the number of watches the user has */
589 	if (!inc_inotify_watches(group->inotify_data.ucounts)) {
590 		inotify_remove_from_idr(group, tmp_i_mark);
591 		ret = -ENOSPC;
592 		goto out_err;
593 	}
594 
595 	/* we are on the idr, now get on the inode */
596 	ret = fsnotify_add_inode_mark_locked(&tmp_i_mark->fsn_mark, inode, 0);
597 	if (ret) {
598 		/* we failed to get on the inode, get off the idr */
599 		inotify_remove_from_idr(group, tmp_i_mark);
600 		goto out_err;
601 	}
602 
603 
604 	/* return the watch descriptor for this new mark */
605 	ret = tmp_i_mark->wd;
606 
607 out_err:
608 	/* match the ref from fsnotify_init_mark() */
609 	fsnotify_put_mark(&tmp_i_mark->fsn_mark);
610 
611 	return ret;
612 }
613 
614 static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
615 {
616 	int ret = 0;
617 
618 	mutex_lock(&group->mark_mutex);
619 	/* try to update and existing watch with the new arg */
620 	ret = inotify_update_existing_watch(group, inode, arg);
621 	/* no mark present, try to add a new one */
622 	if (ret == -ENOENT)
623 		ret = inotify_new_watch(group, inode, arg);
624 	mutex_unlock(&group->mark_mutex);
625 
626 	return ret;
627 }
628 
629 static struct fsnotify_group *inotify_new_group(unsigned int max_events)
630 {
631 	struct fsnotify_group *group;
632 	struct inotify_event_info *oevent;
633 
634 	group = fsnotify_alloc_user_group(&inotify_fsnotify_ops);
635 	if (IS_ERR(group))
636 		return group;
637 
638 	oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL_ACCOUNT);
639 	if (unlikely(!oevent)) {
640 		fsnotify_destroy_group(group);
641 		return ERR_PTR(-ENOMEM);
642 	}
643 	group->overflow_event = &oevent->fse;
644 	fsnotify_init_event(group->overflow_event);
645 	oevent->mask = FS_Q_OVERFLOW;
646 	oevent->wd = -1;
647 	oevent->sync_cookie = 0;
648 	oevent->name_len = 0;
649 
650 	group->max_events = max_events;
651 	group->memcg = get_mem_cgroup_from_mm(current->mm);
652 
653 	spin_lock_init(&group->inotify_data.idr_lock);
654 	idr_init(&group->inotify_data.idr);
655 	group->inotify_data.ucounts = inc_ucount(current_user_ns(),
656 						 current_euid(),
657 						 UCOUNT_INOTIFY_INSTANCES);
658 
659 	if (!group->inotify_data.ucounts) {
660 		fsnotify_destroy_group(group);
661 		return ERR_PTR(-EMFILE);
662 	}
663 
664 	return group;
665 }
666 
667 
668 /* inotify syscalls */
669 static int do_inotify_init(int flags)
670 {
671 	struct fsnotify_group *group;
672 	int ret;
673 
674 	/* Check the IN_* constants for consistency.  */
675 	BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
676 	BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
677 
678 	if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
679 		return -EINVAL;
680 
681 	/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
682 	group = inotify_new_group(inotify_max_queued_events);
683 	if (IS_ERR(group))
684 		return PTR_ERR(group);
685 
686 	ret = anon_inode_getfd("inotify", &inotify_fops, group,
687 				  O_RDONLY | flags);
688 	if (ret < 0)
689 		fsnotify_destroy_group(group);
690 
691 	return ret;
692 }
693 
694 SYSCALL_DEFINE1(inotify_init1, int, flags)
695 {
696 	return do_inotify_init(flags);
697 }
698 
699 SYSCALL_DEFINE0(inotify_init)
700 {
701 	return do_inotify_init(0);
702 }
703 
704 SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
705 		u32, mask)
706 {
707 	struct fsnotify_group *group;
708 	struct inode *inode;
709 	struct path path;
710 	struct fd f;
711 	int ret;
712 	unsigned flags = 0;
713 
714 	/*
715 	 * We share a lot of code with fs/dnotify.  We also share
716 	 * the bit layout between inotify's IN_* and the fsnotify
717 	 * FS_*.  This check ensures that only the inotify IN_*
718 	 * bits get passed in and set in watches/events.
719 	 */
720 	if (unlikely(mask & ~ALL_INOTIFY_BITS))
721 		return -EINVAL;
722 	/*
723 	 * Require at least one valid bit set in the mask.
724 	 * Without _something_ set, we would have no events to
725 	 * watch for.
726 	 */
727 	if (unlikely(!(mask & ALL_INOTIFY_BITS)))
728 		return -EINVAL;
729 
730 	f = fdget(fd);
731 	if (unlikely(!f.file))
732 		return -EBADF;
733 
734 	/* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
735 	if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) {
736 		ret = -EINVAL;
737 		goto fput_and_out;
738 	}
739 
740 	/* verify that this is indeed an inotify instance */
741 	if (unlikely(f.file->f_op != &inotify_fops)) {
742 		ret = -EINVAL;
743 		goto fput_and_out;
744 	}
745 
746 	if (!(mask & IN_DONT_FOLLOW))
747 		flags |= LOOKUP_FOLLOW;
748 	if (mask & IN_ONLYDIR)
749 		flags |= LOOKUP_DIRECTORY;
750 
751 	ret = inotify_find_inode(pathname, &path, flags,
752 			(mask & IN_ALL_EVENTS));
753 	if (ret)
754 		goto fput_and_out;
755 
756 	/* inode held in place by reference to path; group by fget on fd */
757 	inode = path.dentry->d_inode;
758 	group = f.file->private_data;
759 
760 	/* create/update an inode mark */
761 	ret = inotify_update_watch(group, inode, mask);
762 	path_put(&path);
763 fput_and_out:
764 	fdput(f);
765 	return ret;
766 }
767 
768 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
769 {
770 	struct fsnotify_group *group;
771 	struct inotify_inode_mark *i_mark;
772 	struct fd f;
773 	int ret = -EINVAL;
774 
775 	f = fdget(fd);
776 	if (unlikely(!f.file))
777 		return -EBADF;
778 
779 	/* verify that this is indeed an inotify instance */
780 	if (unlikely(f.file->f_op != &inotify_fops))
781 		goto out;
782 
783 	group = f.file->private_data;
784 
785 	i_mark = inotify_idr_find(group, wd);
786 	if (unlikely(!i_mark))
787 		goto out;
788 
789 	ret = 0;
790 
791 	fsnotify_destroy_mark(&i_mark->fsn_mark, group);
792 
793 	/* match ref taken by inotify_idr_find */
794 	fsnotify_put_mark(&i_mark->fsn_mark);
795 
796 out:
797 	fdput(f);
798 	return ret;
799 }
800 
801 /*
802  * inotify_user_setup - Our initialization function.  Note that we cannot return
803  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
804  * must result in panic().
805  */
806 static int __init inotify_user_setup(void)
807 {
808 	unsigned long watches_max;
809 	struct sysinfo si;
810 
811 	si_meminfo(&si);
812 	/*
813 	 * Allow up to 1% of addressable memory to be allocated for inotify
814 	 * watches (per user) limited to the range [8192, 1048576].
815 	 */
816 	watches_max = (((si.totalram - si.totalhigh) / 100) << PAGE_SHIFT) /
817 			INOTIFY_WATCH_COST;
818 	watches_max = clamp(watches_max, 8192UL, 1048576UL);
819 
820 	BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
821 	BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
822 	BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
823 	BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
824 	BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
825 	BUILD_BUG_ON(IN_OPEN != FS_OPEN);
826 	BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
827 	BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
828 	BUILD_BUG_ON(IN_CREATE != FS_CREATE);
829 	BUILD_BUG_ON(IN_DELETE != FS_DELETE);
830 	BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
831 	BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
832 	BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
833 	BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
834 	BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
835 	BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
836 	BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
837 	BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
838 
839 	BUILD_BUG_ON(HWEIGHT32(ALL_INOTIFY_BITS) != 22);
840 
841 	inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark,
842 					       SLAB_PANIC|SLAB_ACCOUNT);
843 
844 	inotify_max_queued_events = 16384;
845 	init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES] = 128;
846 	init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES] = watches_max;
847 
848 	return 0;
849 }
850 fs_initcall(inotify_user_setup);
851