1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * fs/inotify_user.c - inotify support for userspace 4 * 5 * Authors: 6 * John McCutchan <ttb@tentacle.dhs.org> 7 * Robert Love <rml@novell.com> 8 * 9 * Copyright (C) 2005 John McCutchan 10 * Copyright 2006 Hewlett-Packard Development Company, L.P. 11 * 12 * Copyright (C) 2009 Eric Paris <Red Hat Inc> 13 * inotify was largely rewriten to make use of the fsnotify infrastructure 14 */ 15 16 #include <linux/file.h> 17 #include <linux/fs.h> /* struct inode */ 18 #include <linux/fsnotify_backend.h> 19 #include <linux/idr.h> 20 #include <linux/init.h> /* fs_initcall */ 21 #include <linux/inotify.h> 22 #include <linux/kernel.h> /* roundup() */ 23 #include <linux/namei.h> /* LOOKUP_FOLLOW */ 24 #include <linux/sched/signal.h> 25 #include <linux/slab.h> /* struct kmem_cache */ 26 #include <linux/syscalls.h> 27 #include <linux/types.h> 28 #include <linux/anon_inodes.h> 29 #include <linux/uaccess.h> 30 #include <linux/poll.h> 31 #include <linux/wait.h> 32 #include <linux/memcontrol.h> 33 34 #include "inotify.h" 35 #include "../fdinfo.h" 36 37 #include <asm/ioctls.h> 38 39 /* configurable via /proc/sys/fs/inotify/ */ 40 static int inotify_max_queued_events __read_mostly; 41 42 struct kmem_cache *inotify_inode_mark_cachep __read_mostly; 43 44 #ifdef CONFIG_SYSCTL 45 46 #include <linux/sysctl.h> 47 48 struct ctl_table inotify_table[] = { 49 { 50 .procname = "max_user_instances", 51 .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES], 52 .maxlen = sizeof(int), 53 .mode = 0644, 54 .proc_handler = proc_dointvec_minmax, 55 .extra1 = SYSCTL_ZERO, 56 }, 57 { 58 .procname = "max_user_watches", 59 .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES], 60 .maxlen = sizeof(int), 61 .mode = 0644, 62 .proc_handler = proc_dointvec_minmax, 63 .extra1 = SYSCTL_ZERO, 64 }, 65 { 66 .procname = "max_queued_events", 67 .data = &inotify_max_queued_events, 68 .maxlen = sizeof(int), 69 .mode = 0644, 70 .proc_handler = proc_dointvec_minmax, 71 .extra1 = SYSCTL_ZERO 72 }, 73 { } 74 }; 75 #endif /* CONFIG_SYSCTL */ 76 77 static inline __u32 inotify_arg_to_mask(u32 arg) 78 { 79 __u32 mask; 80 81 /* 82 * everything should accept their own ignored, cares about children, 83 * and should receive events when the inode is unmounted 84 */ 85 mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT); 86 87 /* mask off the flags used to open the fd */ 88 mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK)); 89 90 return mask; 91 } 92 93 static inline u32 inotify_mask_to_arg(__u32 mask) 94 { 95 return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED | 96 IN_Q_OVERFLOW); 97 } 98 99 /* intofiy userspace file descriptor functions */ 100 static __poll_t inotify_poll(struct file *file, poll_table *wait) 101 { 102 struct fsnotify_group *group = file->private_data; 103 __poll_t ret = 0; 104 105 poll_wait(file, &group->notification_waitq, wait); 106 spin_lock(&group->notification_lock); 107 if (!fsnotify_notify_queue_is_empty(group)) 108 ret = EPOLLIN | EPOLLRDNORM; 109 spin_unlock(&group->notification_lock); 110 111 return ret; 112 } 113 114 static int round_event_name_len(struct fsnotify_event *fsn_event) 115 { 116 struct inotify_event_info *event; 117 118 event = INOTIFY_E(fsn_event); 119 if (!event->name_len) 120 return 0; 121 return roundup(event->name_len + 1, sizeof(struct inotify_event)); 122 } 123 124 /* 125 * Get an inotify_kernel_event if one exists and is small 126 * enough to fit in "count". Return an error pointer if 127 * not large enough. 128 * 129 * Called with the group->notification_lock held. 130 */ 131 static struct fsnotify_event *get_one_event(struct fsnotify_group *group, 132 size_t count) 133 { 134 size_t event_size = sizeof(struct inotify_event); 135 struct fsnotify_event *event; 136 137 if (fsnotify_notify_queue_is_empty(group)) 138 return NULL; 139 140 event = fsnotify_peek_first_event(group); 141 142 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 143 144 event_size += round_event_name_len(event); 145 if (event_size > count) 146 return ERR_PTR(-EINVAL); 147 148 /* held the notification_lock the whole time, so this is the 149 * same event we peeked above */ 150 fsnotify_remove_first_event(group); 151 152 return event; 153 } 154 155 /* 156 * Copy an event to user space, returning how much we copied. 157 * 158 * We already checked that the event size is smaller than the 159 * buffer we had in "get_one_event()" above. 160 */ 161 static ssize_t copy_event_to_user(struct fsnotify_group *group, 162 struct fsnotify_event *fsn_event, 163 char __user *buf) 164 { 165 struct inotify_event inotify_event; 166 struct inotify_event_info *event; 167 size_t event_size = sizeof(struct inotify_event); 168 size_t name_len; 169 size_t pad_name_len; 170 171 pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event); 172 173 event = INOTIFY_E(fsn_event); 174 name_len = event->name_len; 175 /* 176 * round up name length so it is a multiple of event_size 177 * plus an extra byte for the terminating '\0'. 178 */ 179 pad_name_len = round_event_name_len(fsn_event); 180 inotify_event.len = pad_name_len; 181 inotify_event.mask = inotify_mask_to_arg(event->mask); 182 inotify_event.wd = event->wd; 183 inotify_event.cookie = event->sync_cookie; 184 185 /* send the main event */ 186 if (copy_to_user(buf, &inotify_event, event_size)) 187 return -EFAULT; 188 189 buf += event_size; 190 191 /* 192 * fsnotify only stores the pathname, so here we have to send the pathname 193 * and then pad that pathname out to a multiple of sizeof(inotify_event) 194 * with zeros. 195 */ 196 if (pad_name_len) { 197 /* copy the path name */ 198 if (copy_to_user(buf, event->name, name_len)) 199 return -EFAULT; 200 buf += name_len; 201 202 /* fill userspace with 0's */ 203 if (clear_user(buf, pad_name_len - name_len)) 204 return -EFAULT; 205 event_size += pad_name_len; 206 } 207 208 return event_size; 209 } 210 211 static ssize_t inotify_read(struct file *file, char __user *buf, 212 size_t count, loff_t *pos) 213 { 214 struct fsnotify_group *group; 215 struct fsnotify_event *kevent; 216 char __user *start; 217 int ret; 218 DEFINE_WAIT_FUNC(wait, woken_wake_function); 219 220 start = buf; 221 group = file->private_data; 222 223 add_wait_queue(&group->notification_waitq, &wait); 224 while (1) { 225 spin_lock(&group->notification_lock); 226 kevent = get_one_event(group, count); 227 spin_unlock(&group->notification_lock); 228 229 pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent); 230 231 if (kevent) { 232 ret = PTR_ERR(kevent); 233 if (IS_ERR(kevent)) 234 break; 235 ret = copy_event_to_user(group, kevent, buf); 236 fsnotify_destroy_event(group, kevent); 237 if (ret < 0) 238 break; 239 buf += ret; 240 count -= ret; 241 continue; 242 } 243 244 ret = -EAGAIN; 245 if (file->f_flags & O_NONBLOCK) 246 break; 247 ret = -ERESTARTSYS; 248 if (signal_pending(current)) 249 break; 250 251 if (start != buf) 252 break; 253 254 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 255 } 256 remove_wait_queue(&group->notification_waitq, &wait); 257 258 if (start != buf && ret != -EFAULT) 259 ret = buf - start; 260 return ret; 261 } 262 263 static int inotify_release(struct inode *ignored, struct file *file) 264 { 265 struct fsnotify_group *group = file->private_data; 266 267 pr_debug("%s: group=%p\n", __func__, group); 268 269 /* free this group, matching get was inotify_init->fsnotify_obtain_group */ 270 fsnotify_destroy_group(group); 271 272 return 0; 273 } 274 275 static long inotify_ioctl(struct file *file, unsigned int cmd, 276 unsigned long arg) 277 { 278 struct fsnotify_group *group; 279 struct fsnotify_event *fsn_event; 280 void __user *p; 281 int ret = -ENOTTY; 282 size_t send_len = 0; 283 284 group = file->private_data; 285 p = (void __user *) arg; 286 287 pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd); 288 289 switch (cmd) { 290 case FIONREAD: 291 spin_lock(&group->notification_lock); 292 list_for_each_entry(fsn_event, &group->notification_list, 293 list) { 294 send_len += sizeof(struct inotify_event); 295 send_len += round_event_name_len(fsn_event); 296 } 297 spin_unlock(&group->notification_lock); 298 ret = put_user(send_len, (int __user *) p); 299 break; 300 #ifdef CONFIG_CHECKPOINT_RESTORE 301 case INOTIFY_IOC_SETNEXTWD: 302 ret = -EINVAL; 303 if (arg >= 1 && arg <= INT_MAX) { 304 struct inotify_group_private_data *data; 305 306 data = &group->inotify_data; 307 spin_lock(&data->idr_lock); 308 idr_set_cursor(&data->idr, (unsigned int)arg); 309 spin_unlock(&data->idr_lock); 310 ret = 0; 311 } 312 break; 313 #endif /* CONFIG_CHECKPOINT_RESTORE */ 314 } 315 316 return ret; 317 } 318 319 static const struct file_operations inotify_fops = { 320 .show_fdinfo = inotify_show_fdinfo, 321 .poll = inotify_poll, 322 .read = inotify_read, 323 .fasync = fsnotify_fasync, 324 .release = inotify_release, 325 .unlocked_ioctl = inotify_ioctl, 326 .compat_ioctl = inotify_ioctl, 327 .llseek = noop_llseek, 328 }; 329 330 331 /* 332 * find_inode - resolve a user-given path to a specific inode 333 */ 334 static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags) 335 { 336 int error; 337 338 error = user_path_at(AT_FDCWD, dirname, flags, path); 339 if (error) 340 return error; 341 /* you can only watch an inode if you have read permissions on it */ 342 error = inode_permission(path->dentry->d_inode, MAY_READ); 343 if (error) 344 path_put(path); 345 return error; 346 } 347 348 static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock, 349 struct inotify_inode_mark *i_mark) 350 { 351 int ret; 352 353 idr_preload(GFP_KERNEL); 354 spin_lock(idr_lock); 355 356 ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT); 357 if (ret >= 0) { 358 /* we added the mark to the idr, take a reference */ 359 i_mark->wd = ret; 360 fsnotify_get_mark(&i_mark->fsn_mark); 361 } 362 363 spin_unlock(idr_lock); 364 idr_preload_end(); 365 return ret < 0 ? ret : 0; 366 } 367 368 static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group, 369 int wd) 370 { 371 struct idr *idr = &group->inotify_data.idr; 372 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 373 struct inotify_inode_mark *i_mark; 374 375 assert_spin_locked(idr_lock); 376 377 i_mark = idr_find(idr, wd); 378 if (i_mark) { 379 struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark; 380 381 fsnotify_get_mark(fsn_mark); 382 /* One ref for being in the idr, one ref we just took */ 383 BUG_ON(refcount_read(&fsn_mark->refcnt) < 2); 384 } 385 386 return i_mark; 387 } 388 389 static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group, 390 int wd) 391 { 392 struct inotify_inode_mark *i_mark; 393 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 394 395 spin_lock(idr_lock); 396 i_mark = inotify_idr_find_locked(group, wd); 397 spin_unlock(idr_lock); 398 399 return i_mark; 400 } 401 402 /* 403 * Remove the mark from the idr (if present) and drop the reference 404 * on the mark because it was in the idr. 405 */ 406 static void inotify_remove_from_idr(struct fsnotify_group *group, 407 struct inotify_inode_mark *i_mark) 408 { 409 struct idr *idr = &group->inotify_data.idr; 410 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 411 struct inotify_inode_mark *found_i_mark = NULL; 412 int wd; 413 414 spin_lock(idr_lock); 415 wd = i_mark->wd; 416 417 /* 418 * does this i_mark think it is in the idr? we shouldn't get called 419 * if it wasn't.... 420 */ 421 if (wd == -1) { 422 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n", 423 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group); 424 goto out; 425 } 426 427 /* Lets look in the idr to see if we find it */ 428 found_i_mark = inotify_idr_find_locked(group, wd); 429 if (unlikely(!found_i_mark)) { 430 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n", 431 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group); 432 goto out; 433 } 434 435 /* 436 * We found an mark in the idr at the right wd, but it's 437 * not the mark we were told to remove. eparis seriously 438 * fucked up somewhere. 439 */ 440 if (unlikely(found_i_mark != i_mark)) { 441 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p " 442 "found_i_mark=%p found_i_mark->wd=%d " 443 "found_i_mark->group=%p\n", __func__, i_mark, 444 i_mark->wd, i_mark->fsn_mark.group, found_i_mark, 445 found_i_mark->wd, found_i_mark->fsn_mark.group); 446 goto out; 447 } 448 449 /* 450 * One ref for being in the idr 451 * one ref grabbed by inotify_idr_find 452 */ 453 if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) { 454 printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n", 455 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group); 456 /* we can't really recover with bad ref cnting.. */ 457 BUG(); 458 } 459 460 idr_remove(idr, wd); 461 /* Removed from the idr, drop that ref. */ 462 fsnotify_put_mark(&i_mark->fsn_mark); 463 out: 464 i_mark->wd = -1; 465 spin_unlock(idr_lock); 466 /* match the ref taken by inotify_idr_find_locked() */ 467 if (found_i_mark) 468 fsnotify_put_mark(&found_i_mark->fsn_mark); 469 } 470 471 /* 472 * Send IN_IGNORED for this wd, remove this wd from the idr. 473 */ 474 void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, 475 struct fsnotify_group *group) 476 { 477 struct inotify_inode_mark *i_mark; 478 struct fsnotify_iter_info iter_info = { }; 479 480 fsnotify_iter_set_report_type_mark(&iter_info, FSNOTIFY_OBJ_TYPE_INODE, 481 fsn_mark); 482 483 /* Queue ignore event for the watch */ 484 inotify_handle_event(group, NULL, FS_IN_IGNORED, NULL, 485 FSNOTIFY_EVENT_NONE, NULL, 0, &iter_info); 486 487 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); 488 /* remove this mark from the idr */ 489 inotify_remove_from_idr(group, i_mark); 490 491 dec_inotify_watches(group->inotify_data.ucounts); 492 } 493 494 static int inotify_update_existing_watch(struct fsnotify_group *group, 495 struct inode *inode, 496 u32 arg) 497 { 498 struct fsnotify_mark *fsn_mark; 499 struct inotify_inode_mark *i_mark; 500 __u32 old_mask, new_mask; 501 __u32 mask; 502 int add = (arg & IN_MASK_ADD); 503 int create = (arg & IN_MASK_CREATE); 504 int ret; 505 506 mask = inotify_arg_to_mask(arg); 507 508 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group); 509 if (!fsn_mark) 510 return -ENOENT; 511 else if (create) { 512 ret = -EEXIST; 513 goto out; 514 } 515 516 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); 517 518 spin_lock(&fsn_mark->lock); 519 old_mask = fsn_mark->mask; 520 if (add) 521 fsn_mark->mask |= mask; 522 else 523 fsn_mark->mask = mask; 524 new_mask = fsn_mark->mask; 525 spin_unlock(&fsn_mark->lock); 526 527 if (old_mask != new_mask) { 528 /* more bits in old than in new? */ 529 int dropped = (old_mask & ~new_mask); 530 /* more bits in this fsn_mark than the inode's mask? */ 531 int do_inode = (new_mask & ~inode->i_fsnotify_mask); 532 533 /* update the inode with this new fsn_mark */ 534 if (dropped || do_inode) 535 fsnotify_recalc_mask(inode->i_fsnotify_marks); 536 537 } 538 539 /* return the wd */ 540 ret = i_mark->wd; 541 542 out: 543 /* match the get from fsnotify_find_mark() */ 544 fsnotify_put_mark(fsn_mark); 545 546 return ret; 547 } 548 549 static int inotify_new_watch(struct fsnotify_group *group, 550 struct inode *inode, 551 u32 arg) 552 { 553 struct inotify_inode_mark *tmp_i_mark; 554 __u32 mask; 555 int ret; 556 struct idr *idr = &group->inotify_data.idr; 557 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 558 559 mask = inotify_arg_to_mask(arg); 560 561 tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); 562 if (unlikely(!tmp_i_mark)) 563 return -ENOMEM; 564 565 fsnotify_init_mark(&tmp_i_mark->fsn_mark, group); 566 tmp_i_mark->fsn_mark.mask = mask; 567 tmp_i_mark->wd = -1; 568 569 ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark); 570 if (ret) 571 goto out_err; 572 573 /* increment the number of watches the user has */ 574 if (!inc_inotify_watches(group->inotify_data.ucounts)) { 575 inotify_remove_from_idr(group, tmp_i_mark); 576 ret = -ENOSPC; 577 goto out_err; 578 } 579 580 /* we are on the idr, now get on the inode */ 581 ret = fsnotify_add_inode_mark_locked(&tmp_i_mark->fsn_mark, inode, 0); 582 if (ret) { 583 /* we failed to get on the inode, get off the idr */ 584 inotify_remove_from_idr(group, tmp_i_mark); 585 goto out_err; 586 } 587 588 589 /* return the watch descriptor for this new mark */ 590 ret = tmp_i_mark->wd; 591 592 out_err: 593 /* match the ref from fsnotify_init_mark() */ 594 fsnotify_put_mark(&tmp_i_mark->fsn_mark); 595 596 return ret; 597 } 598 599 static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg) 600 { 601 int ret = 0; 602 603 mutex_lock(&group->mark_mutex); 604 /* try to update and existing watch with the new arg */ 605 ret = inotify_update_existing_watch(group, inode, arg); 606 /* no mark present, try to add a new one */ 607 if (ret == -ENOENT) 608 ret = inotify_new_watch(group, inode, arg); 609 mutex_unlock(&group->mark_mutex); 610 611 return ret; 612 } 613 614 static struct fsnotify_group *inotify_new_group(unsigned int max_events) 615 { 616 struct fsnotify_group *group; 617 struct inotify_event_info *oevent; 618 619 group = fsnotify_alloc_group(&inotify_fsnotify_ops); 620 if (IS_ERR(group)) 621 return group; 622 623 oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL); 624 if (unlikely(!oevent)) { 625 fsnotify_destroy_group(group); 626 return ERR_PTR(-ENOMEM); 627 } 628 group->overflow_event = &oevent->fse; 629 fsnotify_init_event(group->overflow_event, NULL); 630 oevent->mask = FS_Q_OVERFLOW; 631 oevent->wd = -1; 632 oevent->sync_cookie = 0; 633 oevent->name_len = 0; 634 635 group->max_events = max_events; 636 group->memcg = get_mem_cgroup_from_mm(current->mm); 637 638 spin_lock_init(&group->inotify_data.idr_lock); 639 idr_init(&group->inotify_data.idr); 640 group->inotify_data.ucounts = inc_ucount(current_user_ns(), 641 current_euid(), 642 UCOUNT_INOTIFY_INSTANCES); 643 644 if (!group->inotify_data.ucounts) { 645 fsnotify_destroy_group(group); 646 return ERR_PTR(-EMFILE); 647 } 648 649 return group; 650 } 651 652 653 /* inotify syscalls */ 654 static int do_inotify_init(int flags) 655 { 656 struct fsnotify_group *group; 657 int ret; 658 659 /* Check the IN_* constants for consistency. */ 660 BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC); 661 BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK); 662 663 if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) 664 return -EINVAL; 665 666 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */ 667 group = inotify_new_group(inotify_max_queued_events); 668 if (IS_ERR(group)) 669 return PTR_ERR(group); 670 671 ret = anon_inode_getfd("inotify", &inotify_fops, group, 672 O_RDONLY | flags); 673 if (ret < 0) 674 fsnotify_destroy_group(group); 675 676 return ret; 677 } 678 679 SYSCALL_DEFINE1(inotify_init1, int, flags) 680 { 681 return do_inotify_init(flags); 682 } 683 684 SYSCALL_DEFINE0(inotify_init) 685 { 686 return do_inotify_init(0); 687 } 688 689 SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, 690 u32, mask) 691 { 692 struct fsnotify_group *group; 693 struct inode *inode; 694 struct path path; 695 struct fd f; 696 int ret; 697 unsigned flags = 0; 698 699 /* 700 * We share a lot of code with fs/dnotify. We also share 701 * the bit layout between inotify's IN_* and the fsnotify 702 * FS_*. This check ensures that only the inotify IN_* 703 * bits get passed in and set in watches/events. 704 */ 705 if (unlikely(mask & ~ALL_INOTIFY_BITS)) 706 return -EINVAL; 707 /* 708 * Require at least one valid bit set in the mask. 709 * Without _something_ set, we would have no events to 710 * watch for. 711 */ 712 if (unlikely(!(mask & ALL_INOTIFY_BITS))) 713 return -EINVAL; 714 715 f = fdget(fd); 716 if (unlikely(!f.file)) 717 return -EBADF; 718 719 /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */ 720 if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) { 721 ret = -EINVAL; 722 goto fput_and_out; 723 } 724 725 /* verify that this is indeed an inotify instance */ 726 if (unlikely(f.file->f_op != &inotify_fops)) { 727 ret = -EINVAL; 728 goto fput_and_out; 729 } 730 731 if (!(mask & IN_DONT_FOLLOW)) 732 flags |= LOOKUP_FOLLOW; 733 if (mask & IN_ONLYDIR) 734 flags |= LOOKUP_DIRECTORY; 735 736 ret = inotify_find_inode(pathname, &path, flags); 737 if (ret) 738 goto fput_and_out; 739 740 /* inode held in place by reference to path; group by fget on fd */ 741 inode = path.dentry->d_inode; 742 group = f.file->private_data; 743 744 /* create/update an inode mark */ 745 ret = inotify_update_watch(group, inode, mask); 746 path_put(&path); 747 fput_and_out: 748 fdput(f); 749 return ret; 750 } 751 752 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd) 753 { 754 struct fsnotify_group *group; 755 struct inotify_inode_mark *i_mark; 756 struct fd f; 757 int ret = 0; 758 759 f = fdget(fd); 760 if (unlikely(!f.file)) 761 return -EBADF; 762 763 /* verify that this is indeed an inotify instance */ 764 ret = -EINVAL; 765 if (unlikely(f.file->f_op != &inotify_fops)) 766 goto out; 767 768 group = f.file->private_data; 769 770 ret = -EINVAL; 771 i_mark = inotify_idr_find(group, wd); 772 if (unlikely(!i_mark)) 773 goto out; 774 775 ret = 0; 776 777 fsnotify_destroy_mark(&i_mark->fsn_mark, group); 778 779 /* match ref taken by inotify_idr_find */ 780 fsnotify_put_mark(&i_mark->fsn_mark); 781 782 out: 783 fdput(f); 784 return ret; 785 } 786 787 /* 788 * inotify_user_setup - Our initialization function. Note that we cannot return 789 * error because we have compiled-in VFS hooks. So an (unlikely) failure here 790 * must result in panic(). 791 */ 792 static int __init inotify_user_setup(void) 793 { 794 BUILD_BUG_ON(IN_ACCESS != FS_ACCESS); 795 BUILD_BUG_ON(IN_MODIFY != FS_MODIFY); 796 BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB); 797 BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE); 798 BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE); 799 BUILD_BUG_ON(IN_OPEN != FS_OPEN); 800 BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM); 801 BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO); 802 BUILD_BUG_ON(IN_CREATE != FS_CREATE); 803 BUILD_BUG_ON(IN_DELETE != FS_DELETE); 804 BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF); 805 BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF); 806 BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT); 807 BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW); 808 BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED); 809 BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK); 810 BUILD_BUG_ON(IN_ISDIR != FS_ISDIR); 811 BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT); 812 813 BUILD_BUG_ON(HWEIGHT32(ALL_INOTIFY_BITS) != 22); 814 815 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, 816 SLAB_PANIC|SLAB_ACCOUNT); 817 818 inotify_max_queued_events = 16384; 819 init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES] = 128; 820 init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES] = 8192; 821 822 return 0; 823 } 824 fs_initcall(inotify_user_setup); 825