xref: /openbmc/linux/fs/notify/inotify/inotify_user.c (revision 8ffdff6a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * fs/inotify_user.c - inotify support for userspace
4  *
5  * Authors:
6  *	John McCutchan	<ttb@tentacle.dhs.org>
7  *	Robert Love	<rml@novell.com>
8  *
9  * Copyright (C) 2005 John McCutchan
10  * Copyright 2006 Hewlett-Packard Development Company, L.P.
11  *
12  * Copyright (C) 2009 Eric Paris <Red Hat Inc>
13  * inotify was largely rewriten to make use of the fsnotify infrastructure
14  */
15 
16 #include <linux/file.h>
17 #include <linux/fs.h> /* struct inode */
18 #include <linux/fsnotify_backend.h>
19 #include <linux/idr.h>
20 #include <linux/init.h> /* fs_initcall */
21 #include <linux/inotify.h>
22 #include <linux/kernel.h> /* roundup() */
23 #include <linux/namei.h> /* LOOKUP_FOLLOW */
24 #include <linux/sched/signal.h>
25 #include <linux/slab.h> /* struct kmem_cache */
26 #include <linux/syscalls.h>
27 #include <linux/types.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/uaccess.h>
30 #include <linux/poll.h>
31 #include <linux/wait.h>
32 #include <linux/memcontrol.h>
33 #include <linux/security.h>
34 
35 #include "inotify.h"
36 #include "../fdinfo.h"
37 
38 #include <asm/ioctls.h>
39 
40 /*
41  * An inotify watch requires allocating an inotify_inode_mark structure as
42  * well as pinning the watched inode. Doubling the size of a VFS inode
43  * should be more than enough to cover the additional filesystem inode
44  * size increase.
45  */
46 #define INOTIFY_WATCH_COST	(sizeof(struct inotify_inode_mark) + \
47 				 2 * sizeof(struct inode))
48 
49 /* configurable via /proc/sys/fs/inotify/ */
50 static int inotify_max_queued_events __read_mostly;
51 
52 struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
53 
54 #ifdef CONFIG_SYSCTL
55 
56 #include <linux/sysctl.h>
57 
58 struct ctl_table inotify_table[] = {
59 	{
60 		.procname	= "max_user_instances",
61 		.data		= &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES],
62 		.maxlen		= sizeof(int),
63 		.mode		= 0644,
64 		.proc_handler	= proc_dointvec_minmax,
65 		.extra1		= SYSCTL_ZERO,
66 	},
67 	{
68 		.procname	= "max_user_watches",
69 		.data		= &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES],
70 		.maxlen		= sizeof(int),
71 		.mode		= 0644,
72 		.proc_handler	= proc_dointvec_minmax,
73 		.extra1		= SYSCTL_ZERO,
74 	},
75 	{
76 		.procname	= "max_queued_events",
77 		.data		= &inotify_max_queued_events,
78 		.maxlen		= sizeof(int),
79 		.mode		= 0644,
80 		.proc_handler	= proc_dointvec_minmax,
81 		.extra1		= SYSCTL_ZERO
82 	},
83 	{ }
84 };
85 #endif /* CONFIG_SYSCTL */
86 
87 static inline __u32 inotify_arg_to_mask(struct inode *inode, u32 arg)
88 {
89 	__u32 mask;
90 
91 	/*
92 	 * Everything should accept their own ignored and should receive events
93 	 * when the inode is unmounted.  All directories care about children.
94 	 */
95 	mask = (FS_IN_IGNORED | FS_UNMOUNT);
96 	if (S_ISDIR(inode->i_mode))
97 		mask |= FS_EVENT_ON_CHILD;
98 
99 	/* mask off the flags used to open the fd */
100 	mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
101 
102 	return mask;
103 }
104 
105 static inline u32 inotify_mask_to_arg(__u32 mask)
106 {
107 	return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
108 		       IN_Q_OVERFLOW);
109 }
110 
111 /* intofiy userspace file descriptor functions */
112 static __poll_t inotify_poll(struct file *file, poll_table *wait)
113 {
114 	struct fsnotify_group *group = file->private_data;
115 	__poll_t ret = 0;
116 
117 	poll_wait(file, &group->notification_waitq, wait);
118 	spin_lock(&group->notification_lock);
119 	if (!fsnotify_notify_queue_is_empty(group))
120 		ret = EPOLLIN | EPOLLRDNORM;
121 	spin_unlock(&group->notification_lock);
122 
123 	return ret;
124 }
125 
126 static int round_event_name_len(struct fsnotify_event *fsn_event)
127 {
128 	struct inotify_event_info *event;
129 
130 	event = INOTIFY_E(fsn_event);
131 	if (!event->name_len)
132 		return 0;
133 	return roundup(event->name_len + 1, sizeof(struct inotify_event));
134 }
135 
136 /*
137  * Get an inotify_kernel_event if one exists and is small
138  * enough to fit in "count". Return an error pointer if
139  * not large enough.
140  *
141  * Called with the group->notification_lock held.
142  */
143 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
144 					    size_t count)
145 {
146 	size_t event_size = sizeof(struct inotify_event);
147 	struct fsnotify_event *event;
148 
149 	if (fsnotify_notify_queue_is_empty(group))
150 		return NULL;
151 
152 	event = fsnotify_peek_first_event(group);
153 
154 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
155 
156 	event_size += round_event_name_len(event);
157 	if (event_size > count)
158 		return ERR_PTR(-EINVAL);
159 
160 	/* held the notification_lock the whole time, so this is the
161 	 * same event we peeked above */
162 	fsnotify_remove_first_event(group);
163 
164 	return event;
165 }
166 
167 /*
168  * Copy an event to user space, returning how much we copied.
169  *
170  * We already checked that the event size is smaller than the
171  * buffer we had in "get_one_event()" above.
172  */
173 static ssize_t copy_event_to_user(struct fsnotify_group *group,
174 				  struct fsnotify_event *fsn_event,
175 				  char __user *buf)
176 {
177 	struct inotify_event inotify_event;
178 	struct inotify_event_info *event;
179 	size_t event_size = sizeof(struct inotify_event);
180 	size_t name_len;
181 	size_t pad_name_len;
182 
183 	pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event);
184 
185 	event = INOTIFY_E(fsn_event);
186 	name_len = event->name_len;
187 	/*
188 	 * round up name length so it is a multiple of event_size
189 	 * plus an extra byte for the terminating '\0'.
190 	 */
191 	pad_name_len = round_event_name_len(fsn_event);
192 	inotify_event.len = pad_name_len;
193 	inotify_event.mask = inotify_mask_to_arg(event->mask);
194 	inotify_event.wd = event->wd;
195 	inotify_event.cookie = event->sync_cookie;
196 
197 	/* send the main event */
198 	if (copy_to_user(buf, &inotify_event, event_size))
199 		return -EFAULT;
200 
201 	buf += event_size;
202 
203 	/*
204 	 * fsnotify only stores the pathname, so here we have to send the pathname
205 	 * and then pad that pathname out to a multiple of sizeof(inotify_event)
206 	 * with zeros.
207 	 */
208 	if (pad_name_len) {
209 		/* copy the path name */
210 		if (copy_to_user(buf, event->name, name_len))
211 			return -EFAULT;
212 		buf += name_len;
213 
214 		/* fill userspace with 0's */
215 		if (clear_user(buf, pad_name_len - name_len))
216 			return -EFAULT;
217 		event_size += pad_name_len;
218 	}
219 
220 	return event_size;
221 }
222 
223 static ssize_t inotify_read(struct file *file, char __user *buf,
224 			    size_t count, loff_t *pos)
225 {
226 	struct fsnotify_group *group;
227 	struct fsnotify_event *kevent;
228 	char __user *start;
229 	int ret;
230 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
231 
232 	start = buf;
233 	group = file->private_data;
234 
235 	add_wait_queue(&group->notification_waitq, &wait);
236 	while (1) {
237 		spin_lock(&group->notification_lock);
238 		kevent = get_one_event(group, count);
239 		spin_unlock(&group->notification_lock);
240 
241 		pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
242 
243 		if (kevent) {
244 			ret = PTR_ERR(kevent);
245 			if (IS_ERR(kevent))
246 				break;
247 			ret = copy_event_to_user(group, kevent, buf);
248 			fsnotify_destroy_event(group, kevent);
249 			if (ret < 0)
250 				break;
251 			buf += ret;
252 			count -= ret;
253 			continue;
254 		}
255 
256 		ret = -EAGAIN;
257 		if (file->f_flags & O_NONBLOCK)
258 			break;
259 		ret = -ERESTARTSYS;
260 		if (signal_pending(current))
261 			break;
262 
263 		if (start != buf)
264 			break;
265 
266 		wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
267 	}
268 	remove_wait_queue(&group->notification_waitq, &wait);
269 
270 	if (start != buf && ret != -EFAULT)
271 		ret = buf - start;
272 	return ret;
273 }
274 
275 static int inotify_release(struct inode *ignored, struct file *file)
276 {
277 	struct fsnotify_group *group = file->private_data;
278 
279 	pr_debug("%s: group=%p\n", __func__, group);
280 
281 	/* free this group, matching get was inotify_init->fsnotify_obtain_group */
282 	fsnotify_destroy_group(group);
283 
284 	return 0;
285 }
286 
287 static long inotify_ioctl(struct file *file, unsigned int cmd,
288 			  unsigned long arg)
289 {
290 	struct fsnotify_group *group;
291 	struct fsnotify_event *fsn_event;
292 	void __user *p;
293 	int ret = -ENOTTY;
294 	size_t send_len = 0;
295 
296 	group = file->private_data;
297 	p = (void __user *) arg;
298 
299 	pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
300 
301 	switch (cmd) {
302 	case FIONREAD:
303 		spin_lock(&group->notification_lock);
304 		list_for_each_entry(fsn_event, &group->notification_list,
305 				    list) {
306 			send_len += sizeof(struct inotify_event);
307 			send_len += round_event_name_len(fsn_event);
308 		}
309 		spin_unlock(&group->notification_lock);
310 		ret = put_user(send_len, (int __user *) p);
311 		break;
312 #ifdef CONFIG_CHECKPOINT_RESTORE
313 	case INOTIFY_IOC_SETNEXTWD:
314 		ret = -EINVAL;
315 		if (arg >= 1 && arg <= INT_MAX) {
316 			struct inotify_group_private_data *data;
317 
318 			data = &group->inotify_data;
319 			spin_lock(&data->idr_lock);
320 			idr_set_cursor(&data->idr, (unsigned int)arg);
321 			spin_unlock(&data->idr_lock);
322 			ret = 0;
323 		}
324 		break;
325 #endif /* CONFIG_CHECKPOINT_RESTORE */
326 	}
327 
328 	return ret;
329 }
330 
331 static const struct file_operations inotify_fops = {
332 	.show_fdinfo	= inotify_show_fdinfo,
333 	.poll		= inotify_poll,
334 	.read		= inotify_read,
335 	.fasync		= fsnotify_fasync,
336 	.release	= inotify_release,
337 	.unlocked_ioctl	= inotify_ioctl,
338 	.compat_ioctl	= inotify_ioctl,
339 	.llseek		= noop_llseek,
340 };
341 
342 
343 /*
344  * find_inode - resolve a user-given path to a specific inode
345  */
346 static int inotify_find_inode(const char __user *dirname, struct path *path,
347 						unsigned int flags, __u64 mask)
348 {
349 	int error;
350 
351 	error = user_path_at(AT_FDCWD, dirname, flags, path);
352 	if (error)
353 		return error;
354 	/* you can only watch an inode if you have read permissions on it */
355 	error = path_permission(path, MAY_READ);
356 	if (error) {
357 		path_put(path);
358 		return error;
359 	}
360 	error = security_path_notify(path, mask,
361 				FSNOTIFY_OBJ_TYPE_INODE);
362 	if (error)
363 		path_put(path);
364 
365 	return error;
366 }
367 
368 static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
369 			      struct inotify_inode_mark *i_mark)
370 {
371 	int ret;
372 
373 	idr_preload(GFP_KERNEL);
374 	spin_lock(idr_lock);
375 
376 	ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT);
377 	if (ret >= 0) {
378 		/* we added the mark to the idr, take a reference */
379 		i_mark->wd = ret;
380 		fsnotify_get_mark(&i_mark->fsn_mark);
381 	}
382 
383 	spin_unlock(idr_lock);
384 	idr_preload_end();
385 	return ret < 0 ? ret : 0;
386 }
387 
388 static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
389 								int wd)
390 {
391 	struct idr *idr = &group->inotify_data.idr;
392 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
393 	struct inotify_inode_mark *i_mark;
394 
395 	assert_spin_locked(idr_lock);
396 
397 	i_mark = idr_find(idr, wd);
398 	if (i_mark) {
399 		struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
400 
401 		fsnotify_get_mark(fsn_mark);
402 		/* One ref for being in the idr, one ref we just took */
403 		BUG_ON(refcount_read(&fsn_mark->refcnt) < 2);
404 	}
405 
406 	return i_mark;
407 }
408 
409 static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
410 							 int wd)
411 {
412 	struct inotify_inode_mark *i_mark;
413 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
414 
415 	spin_lock(idr_lock);
416 	i_mark = inotify_idr_find_locked(group, wd);
417 	spin_unlock(idr_lock);
418 
419 	return i_mark;
420 }
421 
422 /*
423  * Remove the mark from the idr (if present) and drop the reference
424  * on the mark because it was in the idr.
425  */
426 static void inotify_remove_from_idr(struct fsnotify_group *group,
427 				    struct inotify_inode_mark *i_mark)
428 {
429 	struct idr *idr = &group->inotify_data.idr;
430 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
431 	struct inotify_inode_mark *found_i_mark = NULL;
432 	int wd;
433 
434 	spin_lock(idr_lock);
435 	wd = i_mark->wd;
436 
437 	/*
438 	 * does this i_mark think it is in the idr?  we shouldn't get called
439 	 * if it wasn't....
440 	 */
441 	if (wd == -1) {
442 		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
443 			__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
444 		goto out;
445 	}
446 
447 	/* Lets look in the idr to see if we find it */
448 	found_i_mark = inotify_idr_find_locked(group, wd);
449 	if (unlikely(!found_i_mark)) {
450 		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
451 			__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
452 		goto out;
453 	}
454 
455 	/*
456 	 * We found an mark in the idr at the right wd, but it's
457 	 * not the mark we were told to remove.  eparis seriously
458 	 * fucked up somewhere.
459 	 */
460 	if (unlikely(found_i_mark != i_mark)) {
461 		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
462 			"found_i_mark=%p found_i_mark->wd=%d "
463 			"found_i_mark->group=%p\n", __func__, i_mark,
464 			i_mark->wd, i_mark->fsn_mark.group, found_i_mark,
465 			found_i_mark->wd, found_i_mark->fsn_mark.group);
466 		goto out;
467 	}
468 
469 	/*
470 	 * One ref for being in the idr
471 	 * one ref grabbed by inotify_idr_find
472 	 */
473 	if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) {
474 		printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
475 			 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
476 		/* we can't really recover with bad ref cnting.. */
477 		BUG();
478 	}
479 
480 	idr_remove(idr, wd);
481 	/* Removed from the idr, drop that ref. */
482 	fsnotify_put_mark(&i_mark->fsn_mark);
483 out:
484 	i_mark->wd = -1;
485 	spin_unlock(idr_lock);
486 	/* match the ref taken by inotify_idr_find_locked() */
487 	if (found_i_mark)
488 		fsnotify_put_mark(&found_i_mark->fsn_mark);
489 }
490 
491 /*
492  * Send IN_IGNORED for this wd, remove this wd from the idr.
493  */
494 void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
495 				    struct fsnotify_group *group)
496 {
497 	struct inotify_inode_mark *i_mark;
498 
499 	/* Queue ignore event for the watch */
500 	inotify_handle_inode_event(fsn_mark, FS_IN_IGNORED, NULL, NULL, NULL,
501 				   0);
502 
503 	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
504 	/* remove this mark from the idr */
505 	inotify_remove_from_idr(group, i_mark);
506 
507 	dec_inotify_watches(group->inotify_data.ucounts);
508 }
509 
510 static int inotify_update_existing_watch(struct fsnotify_group *group,
511 					 struct inode *inode,
512 					 u32 arg)
513 {
514 	struct fsnotify_mark *fsn_mark;
515 	struct inotify_inode_mark *i_mark;
516 	__u32 old_mask, new_mask;
517 	__u32 mask;
518 	int add = (arg & IN_MASK_ADD);
519 	int create = (arg & IN_MASK_CREATE);
520 	int ret;
521 
522 	mask = inotify_arg_to_mask(inode, arg);
523 
524 	fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
525 	if (!fsn_mark)
526 		return -ENOENT;
527 	else if (create) {
528 		ret = -EEXIST;
529 		goto out;
530 	}
531 
532 	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
533 
534 	spin_lock(&fsn_mark->lock);
535 	old_mask = fsn_mark->mask;
536 	if (add)
537 		fsn_mark->mask |= mask;
538 	else
539 		fsn_mark->mask = mask;
540 	new_mask = fsn_mark->mask;
541 	spin_unlock(&fsn_mark->lock);
542 
543 	if (old_mask != new_mask) {
544 		/* more bits in old than in new? */
545 		int dropped = (old_mask & ~new_mask);
546 		/* more bits in this fsn_mark than the inode's mask? */
547 		int do_inode = (new_mask & ~inode->i_fsnotify_mask);
548 
549 		/* update the inode with this new fsn_mark */
550 		if (dropped || do_inode)
551 			fsnotify_recalc_mask(inode->i_fsnotify_marks);
552 
553 	}
554 
555 	/* return the wd */
556 	ret = i_mark->wd;
557 
558 out:
559 	/* match the get from fsnotify_find_mark() */
560 	fsnotify_put_mark(fsn_mark);
561 
562 	return ret;
563 }
564 
565 static int inotify_new_watch(struct fsnotify_group *group,
566 			     struct inode *inode,
567 			     u32 arg)
568 {
569 	struct inotify_inode_mark *tmp_i_mark;
570 	__u32 mask;
571 	int ret;
572 	struct idr *idr = &group->inotify_data.idr;
573 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
574 
575 	mask = inotify_arg_to_mask(inode, arg);
576 
577 	tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
578 	if (unlikely(!tmp_i_mark))
579 		return -ENOMEM;
580 
581 	fsnotify_init_mark(&tmp_i_mark->fsn_mark, group);
582 	tmp_i_mark->fsn_mark.mask = mask;
583 	tmp_i_mark->wd = -1;
584 
585 	ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark);
586 	if (ret)
587 		goto out_err;
588 
589 	/* increment the number of watches the user has */
590 	if (!inc_inotify_watches(group->inotify_data.ucounts)) {
591 		inotify_remove_from_idr(group, tmp_i_mark);
592 		ret = -ENOSPC;
593 		goto out_err;
594 	}
595 
596 	/* we are on the idr, now get on the inode */
597 	ret = fsnotify_add_inode_mark_locked(&tmp_i_mark->fsn_mark, inode, 0);
598 	if (ret) {
599 		/* we failed to get on the inode, get off the idr */
600 		inotify_remove_from_idr(group, tmp_i_mark);
601 		goto out_err;
602 	}
603 
604 
605 	/* return the watch descriptor for this new mark */
606 	ret = tmp_i_mark->wd;
607 
608 out_err:
609 	/* match the ref from fsnotify_init_mark() */
610 	fsnotify_put_mark(&tmp_i_mark->fsn_mark);
611 
612 	return ret;
613 }
614 
615 static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
616 {
617 	int ret = 0;
618 
619 	mutex_lock(&group->mark_mutex);
620 	/* try to update and existing watch with the new arg */
621 	ret = inotify_update_existing_watch(group, inode, arg);
622 	/* no mark present, try to add a new one */
623 	if (ret == -ENOENT)
624 		ret = inotify_new_watch(group, inode, arg);
625 	mutex_unlock(&group->mark_mutex);
626 
627 	return ret;
628 }
629 
630 static struct fsnotify_group *inotify_new_group(unsigned int max_events)
631 {
632 	struct fsnotify_group *group;
633 	struct inotify_event_info *oevent;
634 
635 	group = fsnotify_alloc_user_group(&inotify_fsnotify_ops);
636 	if (IS_ERR(group))
637 		return group;
638 
639 	oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL_ACCOUNT);
640 	if (unlikely(!oevent)) {
641 		fsnotify_destroy_group(group);
642 		return ERR_PTR(-ENOMEM);
643 	}
644 	group->overflow_event = &oevent->fse;
645 	fsnotify_init_event(group->overflow_event, 0);
646 	oevent->mask = FS_Q_OVERFLOW;
647 	oevent->wd = -1;
648 	oevent->sync_cookie = 0;
649 	oevent->name_len = 0;
650 
651 	group->max_events = max_events;
652 	group->memcg = get_mem_cgroup_from_mm(current->mm);
653 
654 	spin_lock_init(&group->inotify_data.idr_lock);
655 	idr_init(&group->inotify_data.idr);
656 	group->inotify_data.ucounts = inc_ucount(current_user_ns(),
657 						 current_euid(),
658 						 UCOUNT_INOTIFY_INSTANCES);
659 
660 	if (!group->inotify_data.ucounts) {
661 		fsnotify_destroy_group(group);
662 		return ERR_PTR(-EMFILE);
663 	}
664 
665 	return group;
666 }
667 
668 
669 /* inotify syscalls */
670 static int do_inotify_init(int flags)
671 {
672 	struct fsnotify_group *group;
673 	int ret;
674 
675 	/* Check the IN_* constants for consistency.  */
676 	BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
677 	BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
678 
679 	if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
680 		return -EINVAL;
681 
682 	/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
683 	group = inotify_new_group(inotify_max_queued_events);
684 	if (IS_ERR(group))
685 		return PTR_ERR(group);
686 
687 	ret = anon_inode_getfd("inotify", &inotify_fops, group,
688 				  O_RDONLY | flags);
689 	if (ret < 0)
690 		fsnotify_destroy_group(group);
691 
692 	return ret;
693 }
694 
695 SYSCALL_DEFINE1(inotify_init1, int, flags)
696 {
697 	return do_inotify_init(flags);
698 }
699 
700 SYSCALL_DEFINE0(inotify_init)
701 {
702 	return do_inotify_init(0);
703 }
704 
705 SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
706 		u32, mask)
707 {
708 	struct fsnotify_group *group;
709 	struct inode *inode;
710 	struct path path;
711 	struct fd f;
712 	int ret;
713 	unsigned flags = 0;
714 
715 	/*
716 	 * We share a lot of code with fs/dnotify.  We also share
717 	 * the bit layout between inotify's IN_* and the fsnotify
718 	 * FS_*.  This check ensures that only the inotify IN_*
719 	 * bits get passed in and set in watches/events.
720 	 */
721 	if (unlikely(mask & ~ALL_INOTIFY_BITS))
722 		return -EINVAL;
723 	/*
724 	 * Require at least one valid bit set in the mask.
725 	 * Without _something_ set, we would have no events to
726 	 * watch for.
727 	 */
728 	if (unlikely(!(mask & ALL_INOTIFY_BITS)))
729 		return -EINVAL;
730 
731 	f = fdget(fd);
732 	if (unlikely(!f.file))
733 		return -EBADF;
734 
735 	/* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
736 	if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) {
737 		ret = -EINVAL;
738 		goto fput_and_out;
739 	}
740 
741 	/* verify that this is indeed an inotify instance */
742 	if (unlikely(f.file->f_op != &inotify_fops)) {
743 		ret = -EINVAL;
744 		goto fput_and_out;
745 	}
746 
747 	if (!(mask & IN_DONT_FOLLOW))
748 		flags |= LOOKUP_FOLLOW;
749 	if (mask & IN_ONLYDIR)
750 		flags |= LOOKUP_DIRECTORY;
751 
752 	ret = inotify_find_inode(pathname, &path, flags,
753 			(mask & IN_ALL_EVENTS));
754 	if (ret)
755 		goto fput_and_out;
756 
757 	/* inode held in place by reference to path; group by fget on fd */
758 	inode = path.dentry->d_inode;
759 	group = f.file->private_data;
760 
761 	/* create/update an inode mark */
762 	ret = inotify_update_watch(group, inode, mask);
763 	path_put(&path);
764 fput_and_out:
765 	fdput(f);
766 	return ret;
767 }
768 
769 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
770 {
771 	struct fsnotify_group *group;
772 	struct inotify_inode_mark *i_mark;
773 	struct fd f;
774 	int ret = -EINVAL;
775 
776 	f = fdget(fd);
777 	if (unlikely(!f.file))
778 		return -EBADF;
779 
780 	/* verify that this is indeed an inotify instance */
781 	if (unlikely(f.file->f_op != &inotify_fops))
782 		goto out;
783 
784 	group = f.file->private_data;
785 
786 	i_mark = inotify_idr_find(group, wd);
787 	if (unlikely(!i_mark))
788 		goto out;
789 
790 	ret = 0;
791 
792 	fsnotify_destroy_mark(&i_mark->fsn_mark, group);
793 
794 	/* match ref taken by inotify_idr_find */
795 	fsnotify_put_mark(&i_mark->fsn_mark);
796 
797 out:
798 	fdput(f);
799 	return ret;
800 }
801 
802 /*
803  * inotify_user_setup - Our initialization function.  Note that we cannot return
804  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
805  * must result in panic().
806  */
807 static int __init inotify_user_setup(void)
808 {
809 	unsigned long watches_max;
810 	struct sysinfo si;
811 
812 	si_meminfo(&si);
813 	/*
814 	 * Allow up to 1% of addressable memory to be allocated for inotify
815 	 * watches (per user) limited to the range [8192, 1048576].
816 	 */
817 	watches_max = (((si.totalram - si.totalhigh) / 100) << PAGE_SHIFT) /
818 			INOTIFY_WATCH_COST;
819 	watches_max = clamp(watches_max, 8192UL, 1048576UL);
820 
821 	BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
822 	BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
823 	BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
824 	BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
825 	BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
826 	BUILD_BUG_ON(IN_OPEN != FS_OPEN);
827 	BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
828 	BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
829 	BUILD_BUG_ON(IN_CREATE != FS_CREATE);
830 	BUILD_BUG_ON(IN_DELETE != FS_DELETE);
831 	BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
832 	BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
833 	BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
834 	BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
835 	BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
836 	BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
837 	BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
838 	BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
839 
840 	BUILD_BUG_ON(HWEIGHT32(ALL_INOTIFY_BITS) != 22);
841 
842 	inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark,
843 					       SLAB_PANIC|SLAB_ACCOUNT);
844 
845 	inotify_max_queued_events = 16384;
846 	init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES] = 128;
847 	init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES] = watches_max;
848 
849 	return 0;
850 }
851 fs_initcall(inotify_user_setup);
852