1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * fs/inotify_user.c - inotify support for userspace 4 * 5 * Authors: 6 * John McCutchan <ttb@tentacle.dhs.org> 7 * Robert Love <rml@novell.com> 8 * 9 * Copyright (C) 2005 John McCutchan 10 * Copyright 2006 Hewlett-Packard Development Company, L.P. 11 * 12 * Copyright (C) 2009 Eric Paris <Red Hat Inc> 13 * inotify was largely rewriten to make use of the fsnotify infrastructure 14 */ 15 16 #include <linux/file.h> 17 #include <linux/fs.h> /* struct inode */ 18 #include <linux/fsnotify_backend.h> 19 #include <linux/idr.h> 20 #include <linux/init.h> /* fs_initcall */ 21 #include <linux/inotify.h> 22 #include <linux/kernel.h> /* roundup() */ 23 #include <linux/namei.h> /* LOOKUP_FOLLOW */ 24 #include <linux/sched/signal.h> 25 #include <linux/slab.h> /* struct kmem_cache */ 26 #include <linux/syscalls.h> 27 #include <linux/types.h> 28 #include <linux/anon_inodes.h> 29 #include <linux/uaccess.h> 30 #include <linux/poll.h> 31 #include <linux/wait.h> 32 #include <linux/memcontrol.h> 33 #include <linux/security.h> 34 35 #include "inotify.h" 36 #include "../fdinfo.h" 37 38 #include <asm/ioctls.h> 39 40 /* configurable via /proc/sys/fs/inotify/ */ 41 static int inotify_max_queued_events __read_mostly; 42 43 struct kmem_cache *inotify_inode_mark_cachep __read_mostly; 44 45 #ifdef CONFIG_SYSCTL 46 47 #include <linux/sysctl.h> 48 49 struct ctl_table inotify_table[] = { 50 { 51 .procname = "max_user_instances", 52 .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES], 53 .maxlen = sizeof(int), 54 .mode = 0644, 55 .proc_handler = proc_dointvec_minmax, 56 .extra1 = SYSCTL_ZERO, 57 }, 58 { 59 .procname = "max_user_watches", 60 .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES], 61 .maxlen = sizeof(int), 62 .mode = 0644, 63 .proc_handler = proc_dointvec_minmax, 64 .extra1 = SYSCTL_ZERO, 65 }, 66 { 67 .procname = "max_queued_events", 68 .data = &inotify_max_queued_events, 69 .maxlen = sizeof(int), 70 .mode = 0644, 71 .proc_handler = proc_dointvec_minmax, 72 .extra1 = SYSCTL_ZERO 73 }, 74 { } 75 }; 76 #endif /* CONFIG_SYSCTL */ 77 78 static inline __u32 inotify_arg_to_mask(struct inode *inode, u32 arg) 79 { 80 __u32 mask; 81 82 /* 83 * Everything should accept their own ignored and should receive events 84 * when the inode is unmounted. All directories care about children. 85 */ 86 mask = (FS_IN_IGNORED | FS_UNMOUNT); 87 if (S_ISDIR(inode->i_mode)) 88 mask |= FS_EVENT_ON_CHILD; 89 90 /* mask off the flags used to open the fd */ 91 mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK)); 92 93 return mask; 94 } 95 96 static inline u32 inotify_mask_to_arg(__u32 mask) 97 { 98 return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED | 99 IN_Q_OVERFLOW); 100 } 101 102 /* intofiy userspace file descriptor functions */ 103 static __poll_t inotify_poll(struct file *file, poll_table *wait) 104 { 105 struct fsnotify_group *group = file->private_data; 106 __poll_t ret = 0; 107 108 poll_wait(file, &group->notification_waitq, wait); 109 spin_lock(&group->notification_lock); 110 if (!fsnotify_notify_queue_is_empty(group)) 111 ret = EPOLLIN | EPOLLRDNORM; 112 spin_unlock(&group->notification_lock); 113 114 return ret; 115 } 116 117 static int round_event_name_len(struct fsnotify_event *fsn_event) 118 { 119 struct inotify_event_info *event; 120 121 event = INOTIFY_E(fsn_event); 122 if (!event->name_len) 123 return 0; 124 return roundup(event->name_len + 1, sizeof(struct inotify_event)); 125 } 126 127 /* 128 * Get an inotify_kernel_event if one exists and is small 129 * enough to fit in "count". Return an error pointer if 130 * not large enough. 131 * 132 * Called with the group->notification_lock held. 133 */ 134 static struct fsnotify_event *get_one_event(struct fsnotify_group *group, 135 size_t count) 136 { 137 size_t event_size = sizeof(struct inotify_event); 138 struct fsnotify_event *event; 139 140 if (fsnotify_notify_queue_is_empty(group)) 141 return NULL; 142 143 event = fsnotify_peek_first_event(group); 144 145 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 146 147 event_size += round_event_name_len(event); 148 if (event_size > count) 149 return ERR_PTR(-EINVAL); 150 151 /* held the notification_lock the whole time, so this is the 152 * same event we peeked above */ 153 fsnotify_remove_first_event(group); 154 155 return event; 156 } 157 158 /* 159 * Copy an event to user space, returning how much we copied. 160 * 161 * We already checked that the event size is smaller than the 162 * buffer we had in "get_one_event()" above. 163 */ 164 static ssize_t copy_event_to_user(struct fsnotify_group *group, 165 struct fsnotify_event *fsn_event, 166 char __user *buf) 167 { 168 struct inotify_event inotify_event; 169 struct inotify_event_info *event; 170 size_t event_size = sizeof(struct inotify_event); 171 size_t name_len; 172 size_t pad_name_len; 173 174 pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event); 175 176 event = INOTIFY_E(fsn_event); 177 name_len = event->name_len; 178 /* 179 * round up name length so it is a multiple of event_size 180 * plus an extra byte for the terminating '\0'. 181 */ 182 pad_name_len = round_event_name_len(fsn_event); 183 inotify_event.len = pad_name_len; 184 inotify_event.mask = inotify_mask_to_arg(event->mask); 185 inotify_event.wd = event->wd; 186 inotify_event.cookie = event->sync_cookie; 187 188 /* send the main event */ 189 if (copy_to_user(buf, &inotify_event, event_size)) 190 return -EFAULT; 191 192 buf += event_size; 193 194 /* 195 * fsnotify only stores the pathname, so here we have to send the pathname 196 * and then pad that pathname out to a multiple of sizeof(inotify_event) 197 * with zeros. 198 */ 199 if (pad_name_len) { 200 /* copy the path name */ 201 if (copy_to_user(buf, event->name, name_len)) 202 return -EFAULT; 203 buf += name_len; 204 205 /* fill userspace with 0's */ 206 if (clear_user(buf, pad_name_len - name_len)) 207 return -EFAULT; 208 event_size += pad_name_len; 209 } 210 211 return event_size; 212 } 213 214 static ssize_t inotify_read(struct file *file, char __user *buf, 215 size_t count, loff_t *pos) 216 { 217 struct fsnotify_group *group; 218 struct fsnotify_event *kevent; 219 char __user *start; 220 int ret; 221 DEFINE_WAIT_FUNC(wait, woken_wake_function); 222 223 start = buf; 224 group = file->private_data; 225 226 add_wait_queue(&group->notification_waitq, &wait); 227 while (1) { 228 spin_lock(&group->notification_lock); 229 kevent = get_one_event(group, count); 230 spin_unlock(&group->notification_lock); 231 232 pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent); 233 234 if (kevent) { 235 ret = PTR_ERR(kevent); 236 if (IS_ERR(kevent)) 237 break; 238 ret = copy_event_to_user(group, kevent, buf); 239 fsnotify_destroy_event(group, kevent); 240 if (ret < 0) 241 break; 242 buf += ret; 243 count -= ret; 244 continue; 245 } 246 247 ret = -EAGAIN; 248 if (file->f_flags & O_NONBLOCK) 249 break; 250 ret = -ERESTARTSYS; 251 if (signal_pending(current)) 252 break; 253 254 if (start != buf) 255 break; 256 257 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 258 } 259 remove_wait_queue(&group->notification_waitq, &wait); 260 261 if (start != buf && ret != -EFAULT) 262 ret = buf - start; 263 return ret; 264 } 265 266 static int inotify_release(struct inode *ignored, struct file *file) 267 { 268 struct fsnotify_group *group = file->private_data; 269 270 pr_debug("%s: group=%p\n", __func__, group); 271 272 /* free this group, matching get was inotify_init->fsnotify_obtain_group */ 273 fsnotify_destroy_group(group); 274 275 return 0; 276 } 277 278 static long inotify_ioctl(struct file *file, unsigned int cmd, 279 unsigned long arg) 280 { 281 struct fsnotify_group *group; 282 struct fsnotify_event *fsn_event; 283 void __user *p; 284 int ret = -ENOTTY; 285 size_t send_len = 0; 286 287 group = file->private_data; 288 p = (void __user *) arg; 289 290 pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd); 291 292 switch (cmd) { 293 case FIONREAD: 294 spin_lock(&group->notification_lock); 295 list_for_each_entry(fsn_event, &group->notification_list, 296 list) { 297 send_len += sizeof(struct inotify_event); 298 send_len += round_event_name_len(fsn_event); 299 } 300 spin_unlock(&group->notification_lock); 301 ret = put_user(send_len, (int __user *) p); 302 break; 303 #ifdef CONFIG_CHECKPOINT_RESTORE 304 case INOTIFY_IOC_SETNEXTWD: 305 ret = -EINVAL; 306 if (arg >= 1 && arg <= INT_MAX) { 307 struct inotify_group_private_data *data; 308 309 data = &group->inotify_data; 310 spin_lock(&data->idr_lock); 311 idr_set_cursor(&data->idr, (unsigned int)arg); 312 spin_unlock(&data->idr_lock); 313 ret = 0; 314 } 315 break; 316 #endif /* CONFIG_CHECKPOINT_RESTORE */ 317 } 318 319 return ret; 320 } 321 322 static const struct file_operations inotify_fops = { 323 .show_fdinfo = inotify_show_fdinfo, 324 .poll = inotify_poll, 325 .read = inotify_read, 326 .fasync = fsnotify_fasync, 327 .release = inotify_release, 328 .unlocked_ioctl = inotify_ioctl, 329 .compat_ioctl = inotify_ioctl, 330 .llseek = noop_llseek, 331 }; 332 333 334 /* 335 * find_inode - resolve a user-given path to a specific inode 336 */ 337 static int inotify_find_inode(const char __user *dirname, struct path *path, 338 unsigned int flags, __u64 mask) 339 { 340 int error; 341 342 error = user_path_at(AT_FDCWD, dirname, flags, path); 343 if (error) 344 return error; 345 /* you can only watch an inode if you have read permissions on it */ 346 error = inode_permission(path->dentry->d_inode, MAY_READ); 347 if (error) { 348 path_put(path); 349 return error; 350 } 351 error = security_path_notify(path, mask, 352 FSNOTIFY_OBJ_TYPE_INODE); 353 if (error) 354 path_put(path); 355 356 return error; 357 } 358 359 static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock, 360 struct inotify_inode_mark *i_mark) 361 { 362 int ret; 363 364 idr_preload(GFP_KERNEL); 365 spin_lock(idr_lock); 366 367 ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT); 368 if (ret >= 0) { 369 /* we added the mark to the idr, take a reference */ 370 i_mark->wd = ret; 371 fsnotify_get_mark(&i_mark->fsn_mark); 372 } 373 374 spin_unlock(idr_lock); 375 idr_preload_end(); 376 return ret < 0 ? ret : 0; 377 } 378 379 static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group, 380 int wd) 381 { 382 struct idr *idr = &group->inotify_data.idr; 383 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 384 struct inotify_inode_mark *i_mark; 385 386 assert_spin_locked(idr_lock); 387 388 i_mark = idr_find(idr, wd); 389 if (i_mark) { 390 struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark; 391 392 fsnotify_get_mark(fsn_mark); 393 /* One ref for being in the idr, one ref we just took */ 394 BUG_ON(refcount_read(&fsn_mark->refcnt) < 2); 395 } 396 397 return i_mark; 398 } 399 400 static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group, 401 int wd) 402 { 403 struct inotify_inode_mark *i_mark; 404 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 405 406 spin_lock(idr_lock); 407 i_mark = inotify_idr_find_locked(group, wd); 408 spin_unlock(idr_lock); 409 410 return i_mark; 411 } 412 413 /* 414 * Remove the mark from the idr (if present) and drop the reference 415 * on the mark because it was in the idr. 416 */ 417 static void inotify_remove_from_idr(struct fsnotify_group *group, 418 struct inotify_inode_mark *i_mark) 419 { 420 struct idr *idr = &group->inotify_data.idr; 421 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 422 struct inotify_inode_mark *found_i_mark = NULL; 423 int wd; 424 425 spin_lock(idr_lock); 426 wd = i_mark->wd; 427 428 /* 429 * does this i_mark think it is in the idr? we shouldn't get called 430 * if it wasn't.... 431 */ 432 if (wd == -1) { 433 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n", 434 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group); 435 goto out; 436 } 437 438 /* Lets look in the idr to see if we find it */ 439 found_i_mark = inotify_idr_find_locked(group, wd); 440 if (unlikely(!found_i_mark)) { 441 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n", 442 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group); 443 goto out; 444 } 445 446 /* 447 * We found an mark in the idr at the right wd, but it's 448 * not the mark we were told to remove. eparis seriously 449 * fucked up somewhere. 450 */ 451 if (unlikely(found_i_mark != i_mark)) { 452 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p " 453 "found_i_mark=%p found_i_mark->wd=%d " 454 "found_i_mark->group=%p\n", __func__, i_mark, 455 i_mark->wd, i_mark->fsn_mark.group, found_i_mark, 456 found_i_mark->wd, found_i_mark->fsn_mark.group); 457 goto out; 458 } 459 460 /* 461 * One ref for being in the idr 462 * one ref grabbed by inotify_idr_find 463 */ 464 if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) { 465 printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n", 466 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group); 467 /* we can't really recover with bad ref cnting.. */ 468 BUG(); 469 } 470 471 idr_remove(idr, wd); 472 /* Removed from the idr, drop that ref. */ 473 fsnotify_put_mark(&i_mark->fsn_mark); 474 out: 475 i_mark->wd = -1; 476 spin_unlock(idr_lock); 477 /* match the ref taken by inotify_idr_find_locked() */ 478 if (found_i_mark) 479 fsnotify_put_mark(&found_i_mark->fsn_mark); 480 } 481 482 /* 483 * Send IN_IGNORED for this wd, remove this wd from the idr. 484 */ 485 void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, 486 struct fsnotify_group *group) 487 { 488 struct inotify_inode_mark *i_mark; 489 struct fsnotify_iter_info iter_info = { }; 490 491 fsnotify_iter_set_report_type_mark(&iter_info, FSNOTIFY_OBJ_TYPE_INODE, 492 fsn_mark); 493 494 /* Queue ignore event for the watch */ 495 inotify_handle_event(group, FS_IN_IGNORED, NULL, FSNOTIFY_EVENT_NONE, 496 NULL, NULL, 0, &iter_info); 497 498 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); 499 /* remove this mark from the idr */ 500 inotify_remove_from_idr(group, i_mark); 501 502 dec_inotify_watches(group->inotify_data.ucounts); 503 } 504 505 static int inotify_update_existing_watch(struct fsnotify_group *group, 506 struct inode *inode, 507 u32 arg) 508 { 509 struct fsnotify_mark *fsn_mark; 510 struct inotify_inode_mark *i_mark; 511 __u32 old_mask, new_mask; 512 __u32 mask; 513 int add = (arg & IN_MASK_ADD); 514 int create = (arg & IN_MASK_CREATE); 515 int ret; 516 517 mask = inotify_arg_to_mask(inode, arg); 518 519 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group); 520 if (!fsn_mark) 521 return -ENOENT; 522 else if (create) { 523 ret = -EEXIST; 524 goto out; 525 } 526 527 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); 528 529 spin_lock(&fsn_mark->lock); 530 old_mask = fsn_mark->mask; 531 if (add) 532 fsn_mark->mask |= mask; 533 else 534 fsn_mark->mask = mask; 535 new_mask = fsn_mark->mask; 536 spin_unlock(&fsn_mark->lock); 537 538 if (old_mask != new_mask) { 539 /* more bits in old than in new? */ 540 int dropped = (old_mask & ~new_mask); 541 /* more bits in this fsn_mark than the inode's mask? */ 542 int do_inode = (new_mask & ~inode->i_fsnotify_mask); 543 544 /* update the inode with this new fsn_mark */ 545 if (dropped || do_inode) 546 fsnotify_recalc_mask(inode->i_fsnotify_marks); 547 548 } 549 550 /* return the wd */ 551 ret = i_mark->wd; 552 553 out: 554 /* match the get from fsnotify_find_mark() */ 555 fsnotify_put_mark(fsn_mark); 556 557 return ret; 558 } 559 560 static int inotify_new_watch(struct fsnotify_group *group, 561 struct inode *inode, 562 u32 arg) 563 { 564 struct inotify_inode_mark *tmp_i_mark; 565 __u32 mask; 566 int ret; 567 struct idr *idr = &group->inotify_data.idr; 568 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 569 570 mask = inotify_arg_to_mask(inode, arg); 571 572 tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); 573 if (unlikely(!tmp_i_mark)) 574 return -ENOMEM; 575 576 fsnotify_init_mark(&tmp_i_mark->fsn_mark, group); 577 tmp_i_mark->fsn_mark.mask = mask; 578 tmp_i_mark->wd = -1; 579 580 ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark); 581 if (ret) 582 goto out_err; 583 584 /* increment the number of watches the user has */ 585 if (!inc_inotify_watches(group->inotify_data.ucounts)) { 586 inotify_remove_from_idr(group, tmp_i_mark); 587 ret = -ENOSPC; 588 goto out_err; 589 } 590 591 /* we are on the idr, now get on the inode */ 592 ret = fsnotify_add_inode_mark_locked(&tmp_i_mark->fsn_mark, inode, 0); 593 if (ret) { 594 /* we failed to get on the inode, get off the idr */ 595 inotify_remove_from_idr(group, tmp_i_mark); 596 goto out_err; 597 } 598 599 600 /* return the watch descriptor for this new mark */ 601 ret = tmp_i_mark->wd; 602 603 out_err: 604 /* match the ref from fsnotify_init_mark() */ 605 fsnotify_put_mark(&tmp_i_mark->fsn_mark); 606 607 return ret; 608 } 609 610 static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg) 611 { 612 int ret = 0; 613 614 mutex_lock(&group->mark_mutex); 615 /* try to update and existing watch with the new arg */ 616 ret = inotify_update_existing_watch(group, inode, arg); 617 /* no mark present, try to add a new one */ 618 if (ret == -ENOENT) 619 ret = inotify_new_watch(group, inode, arg); 620 mutex_unlock(&group->mark_mutex); 621 622 return ret; 623 } 624 625 static struct fsnotify_group *inotify_new_group(unsigned int max_events) 626 { 627 struct fsnotify_group *group; 628 struct inotify_event_info *oevent; 629 630 group = fsnotify_alloc_group(&inotify_fsnotify_ops); 631 if (IS_ERR(group)) 632 return group; 633 634 oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL); 635 if (unlikely(!oevent)) { 636 fsnotify_destroy_group(group); 637 return ERR_PTR(-ENOMEM); 638 } 639 group->overflow_event = &oevent->fse; 640 fsnotify_init_event(group->overflow_event, 0); 641 oevent->mask = FS_Q_OVERFLOW; 642 oevent->wd = -1; 643 oevent->sync_cookie = 0; 644 oevent->name_len = 0; 645 646 group->max_events = max_events; 647 group->memcg = get_mem_cgroup_from_mm(current->mm); 648 649 spin_lock_init(&group->inotify_data.idr_lock); 650 idr_init(&group->inotify_data.idr); 651 group->inotify_data.ucounts = inc_ucount(current_user_ns(), 652 current_euid(), 653 UCOUNT_INOTIFY_INSTANCES); 654 655 if (!group->inotify_data.ucounts) { 656 fsnotify_destroy_group(group); 657 return ERR_PTR(-EMFILE); 658 } 659 660 return group; 661 } 662 663 664 /* inotify syscalls */ 665 static int do_inotify_init(int flags) 666 { 667 struct fsnotify_group *group; 668 int ret; 669 670 /* Check the IN_* constants for consistency. */ 671 BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC); 672 BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK); 673 674 if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) 675 return -EINVAL; 676 677 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */ 678 group = inotify_new_group(inotify_max_queued_events); 679 if (IS_ERR(group)) 680 return PTR_ERR(group); 681 682 ret = anon_inode_getfd("inotify", &inotify_fops, group, 683 O_RDONLY | flags); 684 if (ret < 0) 685 fsnotify_destroy_group(group); 686 687 return ret; 688 } 689 690 SYSCALL_DEFINE1(inotify_init1, int, flags) 691 { 692 return do_inotify_init(flags); 693 } 694 695 SYSCALL_DEFINE0(inotify_init) 696 { 697 return do_inotify_init(0); 698 } 699 700 SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, 701 u32, mask) 702 { 703 struct fsnotify_group *group; 704 struct inode *inode; 705 struct path path; 706 struct fd f; 707 int ret; 708 unsigned flags = 0; 709 710 /* 711 * We share a lot of code with fs/dnotify. We also share 712 * the bit layout between inotify's IN_* and the fsnotify 713 * FS_*. This check ensures that only the inotify IN_* 714 * bits get passed in and set in watches/events. 715 */ 716 if (unlikely(mask & ~ALL_INOTIFY_BITS)) 717 return -EINVAL; 718 /* 719 * Require at least one valid bit set in the mask. 720 * Without _something_ set, we would have no events to 721 * watch for. 722 */ 723 if (unlikely(!(mask & ALL_INOTIFY_BITS))) 724 return -EINVAL; 725 726 f = fdget(fd); 727 if (unlikely(!f.file)) 728 return -EBADF; 729 730 /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */ 731 if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) { 732 ret = -EINVAL; 733 goto fput_and_out; 734 } 735 736 /* verify that this is indeed an inotify instance */ 737 if (unlikely(f.file->f_op != &inotify_fops)) { 738 ret = -EINVAL; 739 goto fput_and_out; 740 } 741 742 if (!(mask & IN_DONT_FOLLOW)) 743 flags |= LOOKUP_FOLLOW; 744 if (mask & IN_ONLYDIR) 745 flags |= LOOKUP_DIRECTORY; 746 747 ret = inotify_find_inode(pathname, &path, flags, 748 (mask & IN_ALL_EVENTS)); 749 if (ret) 750 goto fput_and_out; 751 752 /* inode held in place by reference to path; group by fget on fd */ 753 inode = path.dentry->d_inode; 754 group = f.file->private_data; 755 756 /* create/update an inode mark */ 757 ret = inotify_update_watch(group, inode, mask); 758 path_put(&path); 759 fput_and_out: 760 fdput(f); 761 return ret; 762 } 763 764 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd) 765 { 766 struct fsnotify_group *group; 767 struct inotify_inode_mark *i_mark; 768 struct fd f; 769 int ret = -EINVAL; 770 771 f = fdget(fd); 772 if (unlikely(!f.file)) 773 return -EBADF; 774 775 /* verify that this is indeed an inotify instance */ 776 if (unlikely(f.file->f_op != &inotify_fops)) 777 goto out; 778 779 group = f.file->private_data; 780 781 i_mark = inotify_idr_find(group, wd); 782 if (unlikely(!i_mark)) 783 goto out; 784 785 ret = 0; 786 787 fsnotify_destroy_mark(&i_mark->fsn_mark, group); 788 789 /* match ref taken by inotify_idr_find */ 790 fsnotify_put_mark(&i_mark->fsn_mark); 791 792 out: 793 fdput(f); 794 return ret; 795 } 796 797 /* 798 * inotify_user_setup - Our initialization function. Note that we cannot return 799 * error because we have compiled-in VFS hooks. So an (unlikely) failure here 800 * must result in panic(). 801 */ 802 static int __init inotify_user_setup(void) 803 { 804 BUILD_BUG_ON(IN_ACCESS != FS_ACCESS); 805 BUILD_BUG_ON(IN_MODIFY != FS_MODIFY); 806 BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB); 807 BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE); 808 BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE); 809 BUILD_BUG_ON(IN_OPEN != FS_OPEN); 810 BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM); 811 BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO); 812 BUILD_BUG_ON(IN_CREATE != FS_CREATE); 813 BUILD_BUG_ON(IN_DELETE != FS_DELETE); 814 BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF); 815 BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF); 816 BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT); 817 BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW); 818 BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED); 819 BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK); 820 BUILD_BUG_ON(IN_ISDIR != FS_ISDIR); 821 BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT); 822 823 BUILD_BUG_ON(HWEIGHT32(ALL_INOTIFY_BITS) != 22); 824 825 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, 826 SLAB_PANIC|SLAB_ACCOUNT); 827 828 inotify_max_queued_events = 16384; 829 init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES] = 128; 830 init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES] = 8192; 831 832 return 0; 833 } 834 fs_initcall(inotify_user_setup); 835