1 /* 2 * fs/inotify_user.c - inotify support for userspace 3 * 4 * Authors: 5 * John McCutchan <ttb@tentacle.dhs.org> 6 * Robert Love <rml@novell.com> 7 * 8 * Copyright (C) 2005 John McCutchan 9 * Copyright 2006 Hewlett-Packard Development Company, L.P. 10 * 11 * Copyright (C) 2009 Eric Paris <Red Hat Inc> 12 * inotify was largely rewriten to make use of the fsnotify infrastructure 13 * 14 * This program is free software; you can redistribute it and/or modify it 15 * under the terms of the GNU General Public License as published by the 16 * Free Software Foundation; either version 2, or (at your option) any 17 * later version. 18 * 19 * This program is distributed in the hope that it will be useful, but 20 * WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 22 * General Public License for more details. 23 */ 24 25 #include <linux/file.h> 26 #include <linux/fs.h> /* struct inode */ 27 #include <linux/fsnotify_backend.h> 28 #include <linux/idr.h> 29 #include <linux/init.h> /* module_init */ 30 #include <linux/inotify.h> 31 #include <linux/kernel.h> /* roundup() */ 32 #include <linux/namei.h> /* LOOKUP_FOLLOW */ 33 #include <linux/sched.h> /* struct user */ 34 #include <linux/slab.h> /* struct kmem_cache */ 35 #include <linux/syscalls.h> 36 #include <linux/types.h> 37 #include <linux/anon_inodes.h> 38 #include <linux/uaccess.h> 39 #include <linux/poll.h> 40 #include <linux/wait.h> 41 42 #include "inotify.h" 43 44 #include <asm/ioctls.h> 45 46 /* these are configurable via /proc/sys/fs/inotify/ */ 47 static int inotify_max_user_instances __read_mostly; 48 static int inotify_max_queued_events __read_mostly; 49 int inotify_max_user_watches __read_mostly; 50 51 static struct kmem_cache *inotify_inode_mark_cachep __read_mostly; 52 struct kmem_cache *event_priv_cachep __read_mostly; 53 54 /* 55 * When inotify registers a new group it increments this and uses that 56 * value as an offset to set the fsnotify group "name" and priority. 57 */ 58 static atomic_t inotify_grp_num; 59 60 #ifdef CONFIG_SYSCTL 61 62 #include <linux/sysctl.h> 63 64 static int zero; 65 66 ctl_table inotify_table[] = { 67 { 68 .procname = "max_user_instances", 69 .data = &inotify_max_user_instances, 70 .maxlen = sizeof(int), 71 .mode = 0644, 72 .proc_handler = proc_dointvec_minmax, 73 .extra1 = &zero, 74 }, 75 { 76 .procname = "max_user_watches", 77 .data = &inotify_max_user_watches, 78 .maxlen = sizeof(int), 79 .mode = 0644, 80 .proc_handler = proc_dointvec_minmax, 81 .extra1 = &zero, 82 }, 83 { 84 .procname = "max_queued_events", 85 .data = &inotify_max_queued_events, 86 .maxlen = sizeof(int), 87 .mode = 0644, 88 .proc_handler = proc_dointvec_minmax, 89 .extra1 = &zero 90 }, 91 { } 92 }; 93 #endif /* CONFIG_SYSCTL */ 94 95 static inline __u32 inotify_arg_to_mask(u32 arg) 96 { 97 __u32 mask; 98 99 /* everything should accept their own ignored and cares about children */ 100 mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD); 101 102 /* mask off the flags used to open the fd */ 103 mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT)); 104 105 return mask; 106 } 107 108 static inline u32 inotify_mask_to_arg(__u32 mask) 109 { 110 return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED | 111 IN_Q_OVERFLOW); 112 } 113 114 /* intofiy userspace file descriptor functions */ 115 static unsigned int inotify_poll(struct file *file, poll_table *wait) 116 { 117 struct fsnotify_group *group = file->private_data; 118 int ret = 0; 119 120 poll_wait(file, &group->notification_waitq, wait); 121 mutex_lock(&group->notification_mutex); 122 if (!fsnotify_notify_queue_is_empty(group)) 123 ret = POLLIN | POLLRDNORM; 124 mutex_unlock(&group->notification_mutex); 125 126 return ret; 127 } 128 129 /* 130 * Get an inotify_kernel_event if one exists and is small 131 * enough to fit in "count". Return an error pointer if 132 * not large enough. 133 * 134 * Called with the group->notification_mutex held. 135 */ 136 static struct fsnotify_event *get_one_event(struct fsnotify_group *group, 137 size_t count) 138 { 139 size_t event_size = sizeof(struct inotify_event); 140 struct fsnotify_event *event; 141 142 if (fsnotify_notify_queue_is_empty(group)) 143 return NULL; 144 145 event = fsnotify_peek_notify_event(group); 146 147 if (event->name_len) 148 event_size += roundup(event->name_len + 1, event_size); 149 150 if (event_size > count) 151 return ERR_PTR(-EINVAL); 152 153 /* held the notification_mutex the whole time, so this is the 154 * same event we peeked above */ 155 fsnotify_remove_notify_event(group); 156 157 return event; 158 } 159 160 /* 161 * Copy an event to user space, returning how much we copied. 162 * 163 * We already checked that the event size is smaller than the 164 * buffer we had in "get_one_event()" above. 165 */ 166 static ssize_t copy_event_to_user(struct fsnotify_group *group, 167 struct fsnotify_event *event, 168 char __user *buf) 169 { 170 struct inotify_event inotify_event; 171 struct fsnotify_event_private_data *fsn_priv; 172 struct inotify_event_private_data *priv; 173 size_t event_size = sizeof(struct inotify_event); 174 size_t name_len = 0; 175 176 /* we get the inotify watch descriptor from the event private data */ 177 spin_lock(&event->lock); 178 fsn_priv = fsnotify_remove_priv_from_event(group, event); 179 spin_unlock(&event->lock); 180 181 if (!fsn_priv) 182 inotify_event.wd = -1; 183 else { 184 priv = container_of(fsn_priv, struct inotify_event_private_data, 185 fsnotify_event_priv_data); 186 inotify_event.wd = priv->wd; 187 inotify_free_event_priv(fsn_priv); 188 } 189 190 /* 191 * round up event->name_len so it is a multiple of event_size 192 * plus an extra byte for the terminating '\0'. 193 */ 194 if (event->name_len) 195 name_len = roundup(event->name_len + 1, event_size); 196 inotify_event.len = name_len; 197 198 inotify_event.mask = inotify_mask_to_arg(event->mask); 199 inotify_event.cookie = event->sync_cookie; 200 201 /* send the main event */ 202 if (copy_to_user(buf, &inotify_event, event_size)) 203 return -EFAULT; 204 205 buf += event_size; 206 207 /* 208 * fsnotify only stores the pathname, so here we have to send the pathname 209 * and then pad that pathname out to a multiple of sizeof(inotify_event) 210 * with zeros. I get my zeros from the nul_inotify_event. 211 */ 212 if (name_len) { 213 unsigned int len_to_zero = name_len - event->name_len; 214 /* copy the path name */ 215 if (copy_to_user(buf, event->file_name, event->name_len)) 216 return -EFAULT; 217 buf += event->name_len; 218 219 /* fill userspace with 0's */ 220 if (clear_user(buf, len_to_zero)) 221 return -EFAULT; 222 buf += len_to_zero; 223 event_size += name_len; 224 } 225 226 return event_size; 227 } 228 229 static ssize_t inotify_read(struct file *file, char __user *buf, 230 size_t count, loff_t *pos) 231 { 232 struct fsnotify_group *group; 233 struct fsnotify_event *kevent; 234 char __user *start; 235 int ret; 236 DEFINE_WAIT(wait); 237 238 start = buf; 239 group = file->private_data; 240 241 while (1) { 242 prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE); 243 244 mutex_lock(&group->notification_mutex); 245 kevent = get_one_event(group, count); 246 mutex_unlock(&group->notification_mutex); 247 248 if (kevent) { 249 ret = PTR_ERR(kevent); 250 if (IS_ERR(kevent)) 251 break; 252 ret = copy_event_to_user(group, kevent, buf); 253 fsnotify_put_event(kevent); 254 if (ret < 0) 255 break; 256 buf += ret; 257 count -= ret; 258 continue; 259 } 260 261 ret = -EAGAIN; 262 if (file->f_flags & O_NONBLOCK) 263 break; 264 ret = -EINTR; 265 if (signal_pending(current)) 266 break; 267 268 if (start != buf) 269 break; 270 271 schedule(); 272 } 273 274 finish_wait(&group->notification_waitq, &wait); 275 if (start != buf && ret != -EFAULT) 276 ret = buf - start; 277 return ret; 278 } 279 280 static int inotify_fasync(int fd, struct file *file, int on) 281 { 282 struct fsnotify_group *group = file->private_data; 283 284 return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO; 285 } 286 287 static int inotify_release(struct inode *ignored, struct file *file) 288 { 289 struct fsnotify_group *group = file->private_data; 290 struct user_struct *user = group->inotify_data.user; 291 292 fsnotify_clear_marks_by_group(group); 293 294 /* free this group, matching get was inotify_init->fsnotify_obtain_group */ 295 fsnotify_put_group(group); 296 297 atomic_dec(&user->inotify_devs); 298 299 return 0; 300 } 301 302 static long inotify_ioctl(struct file *file, unsigned int cmd, 303 unsigned long arg) 304 { 305 struct fsnotify_group *group; 306 struct fsnotify_event_holder *holder; 307 struct fsnotify_event *event; 308 void __user *p; 309 int ret = -ENOTTY; 310 size_t send_len = 0; 311 312 group = file->private_data; 313 p = (void __user *) arg; 314 315 switch (cmd) { 316 case FIONREAD: 317 mutex_lock(&group->notification_mutex); 318 list_for_each_entry(holder, &group->notification_list, event_list) { 319 event = holder->event; 320 send_len += sizeof(struct inotify_event); 321 if (event->name_len) 322 send_len += roundup(event->name_len + 1, 323 sizeof(struct inotify_event)); 324 } 325 mutex_unlock(&group->notification_mutex); 326 ret = put_user(send_len, (int __user *) p); 327 break; 328 } 329 330 return ret; 331 } 332 333 static const struct file_operations inotify_fops = { 334 .poll = inotify_poll, 335 .read = inotify_read, 336 .fasync = inotify_fasync, 337 .release = inotify_release, 338 .unlocked_ioctl = inotify_ioctl, 339 .compat_ioctl = inotify_ioctl, 340 }; 341 342 343 /* 344 * find_inode - resolve a user-given path to a specific inode 345 */ 346 static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags) 347 { 348 int error; 349 350 error = user_path_at(AT_FDCWD, dirname, flags, path); 351 if (error) 352 return error; 353 /* you can only watch an inode if you have read permissions on it */ 354 error = inode_permission(path->dentry->d_inode, MAY_READ); 355 if (error) 356 path_put(path); 357 return error; 358 } 359 360 /* 361 * Remove the mark from the idr (if present) and drop the reference 362 * on the mark because it was in the idr. 363 */ 364 static void inotify_remove_from_idr(struct fsnotify_group *group, 365 struct inotify_inode_mark_entry *ientry) 366 { 367 struct idr *idr; 368 struct fsnotify_mark_entry *entry; 369 struct inotify_inode_mark_entry *found_ientry; 370 int wd; 371 372 spin_lock(&group->inotify_data.idr_lock); 373 idr = &group->inotify_data.idr; 374 wd = ientry->wd; 375 376 if (wd == -1) 377 goto out; 378 379 entry = idr_find(&group->inotify_data.idr, wd); 380 if (unlikely(!entry)) 381 goto out; 382 383 found_ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 384 if (unlikely(found_ientry != ientry)) { 385 /* We found an entry in the idr with the right wd, but it's 386 * not the entry we were told to remove. eparis seriously 387 * fucked up somewhere. */ 388 WARN_ON(1); 389 ientry->wd = -1; 390 goto out; 391 } 392 393 /* One ref for being in the idr, one ref held by the caller */ 394 BUG_ON(atomic_read(&entry->refcnt) < 2); 395 396 idr_remove(idr, wd); 397 ientry->wd = -1; 398 399 /* removed from the idr, drop that ref */ 400 fsnotify_put_mark(entry); 401 out: 402 spin_unlock(&group->inotify_data.idr_lock); 403 } 404 405 /* 406 * Send IN_IGNORED for this wd, remove this wd from the idr. 407 */ 408 void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, 409 struct fsnotify_group *group) 410 { 411 struct inotify_inode_mark_entry *ientry; 412 struct fsnotify_event *ignored_event; 413 struct inotify_event_private_data *event_priv; 414 struct fsnotify_event_private_data *fsn_event_priv; 415 int ret; 416 417 ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, 418 FSNOTIFY_EVENT_NONE, NULL, 0, 419 GFP_NOFS); 420 if (!ignored_event) 421 return; 422 423 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 424 425 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS); 426 if (unlikely(!event_priv)) 427 goto skip_send_ignore; 428 429 fsn_event_priv = &event_priv->fsnotify_event_priv_data; 430 431 fsn_event_priv->group = group; 432 event_priv->wd = ientry->wd; 433 434 ret = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv); 435 if (ret) 436 inotify_free_event_priv(fsn_event_priv); 437 438 skip_send_ignore: 439 440 /* matches the reference taken when the event was created */ 441 fsnotify_put_event(ignored_event); 442 443 /* remove this entry from the idr */ 444 inotify_remove_from_idr(group, ientry); 445 446 atomic_dec(&group->inotify_data.user->inotify_watches); 447 } 448 449 /* ding dong the mark is dead */ 450 static void inotify_free_mark(struct fsnotify_mark_entry *entry) 451 { 452 struct inotify_inode_mark_entry *ientry = (struct inotify_inode_mark_entry *)entry; 453 454 kmem_cache_free(inotify_inode_mark_cachep, ientry); 455 } 456 457 static int inotify_update_existing_watch(struct fsnotify_group *group, 458 struct inode *inode, 459 u32 arg) 460 { 461 struct fsnotify_mark_entry *entry; 462 struct inotify_inode_mark_entry *ientry; 463 __u32 old_mask, new_mask; 464 __u32 mask; 465 int add = (arg & IN_MASK_ADD); 466 int ret; 467 468 /* don't allow invalid bits: we don't want flags set */ 469 mask = inotify_arg_to_mask(arg); 470 if (unlikely(!mask)) 471 return -EINVAL; 472 473 spin_lock(&inode->i_lock); 474 entry = fsnotify_find_mark_entry(group, inode); 475 spin_unlock(&inode->i_lock); 476 if (!entry) 477 return -ENOENT; 478 479 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 480 481 spin_lock(&entry->lock); 482 483 old_mask = entry->mask; 484 if (add) { 485 entry->mask |= mask; 486 new_mask = entry->mask; 487 } else { 488 entry->mask = mask; 489 new_mask = entry->mask; 490 } 491 492 spin_unlock(&entry->lock); 493 494 if (old_mask != new_mask) { 495 /* more bits in old than in new? */ 496 int dropped = (old_mask & ~new_mask); 497 /* more bits in this entry than the inode's mask? */ 498 int do_inode = (new_mask & ~inode->i_fsnotify_mask); 499 /* more bits in this entry than the group? */ 500 int do_group = (new_mask & ~group->mask); 501 502 /* update the inode with this new entry */ 503 if (dropped || do_inode) 504 fsnotify_recalc_inode_mask(inode); 505 506 /* update the group mask with the new mask */ 507 if (dropped || do_group) 508 fsnotify_recalc_group_mask(group); 509 } 510 511 /* return the wd */ 512 ret = ientry->wd; 513 514 /* match the get from fsnotify_find_mark_entry() */ 515 fsnotify_put_mark(entry); 516 517 return ret; 518 } 519 520 static int inotify_new_watch(struct fsnotify_group *group, 521 struct inode *inode, 522 u32 arg) 523 { 524 struct inotify_inode_mark_entry *tmp_ientry; 525 __u32 mask; 526 int ret; 527 528 /* don't allow invalid bits: we don't want flags set */ 529 mask = inotify_arg_to_mask(arg); 530 if (unlikely(!mask)) 531 return -EINVAL; 532 533 tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); 534 if (unlikely(!tmp_ientry)) 535 return -ENOMEM; 536 537 fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark); 538 tmp_ientry->fsn_entry.mask = mask; 539 tmp_ientry->wd = -1; 540 541 ret = -ENOSPC; 542 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) 543 goto out_err; 544 retry: 545 ret = -ENOMEM; 546 if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL))) 547 goto out_err; 548 549 /* we are putting the mark on the idr, take a reference */ 550 fsnotify_get_mark(&tmp_ientry->fsn_entry); 551 552 spin_lock(&group->inotify_data.idr_lock); 553 ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry, 554 group->inotify_data.last_wd+1, 555 &tmp_ientry->wd); 556 spin_unlock(&group->inotify_data.idr_lock); 557 if (ret) { 558 /* we didn't get on the idr, drop the idr reference */ 559 fsnotify_put_mark(&tmp_ientry->fsn_entry); 560 561 /* idr was out of memory allocate and try again */ 562 if (ret == -EAGAIN) 563 goto retry; 564 goto out_err; 565 } 566 567 /* we are on the idr, now get on the inode */ 568 ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode); 569 if (ret) { 570 /* we failed to get on the inode, get off the idr */ 571 inotify_remove_from_idr(group, tmp_ientry); 572 goto out_err; 573 } 574 575 /* update the idr hint, who cares about races, it's just a hint */ 576 group->inotify_data.last_wd = tmp_ientry->wd; 577 578 /* increment the number of watches the user has */ 579 atomic_inc(&group->inotify_data.user->inotify_watches); 580 581 /* return the watch descriptor for this new entry */ 582 ret = tmp_ientry->wd; 583 584 /* if this mark added a new event update the group mask */ 585 if (mask & ~group->mask) 586 fsnotify_recalc_group_mask(group); 587 588 out_err: 589 /* match the ref from fsnotify_init_markentry() */ 590 fsnotify_put_mark(&tmp_ientry->fsn_entry); 591 592 return ret; 593 } 594 595 static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg) 596 { 597 int ret = 0; 598 599 retry: 600 /* try to update and existing watch with the new arg */ 601 ret = inotify_update_existing_watch(group, inode, arg); 602 /* no mark present, try to add a new one */ 603 if (ret == -ENOENT) 604 ret = inotify_new_watch(group, inode, arg); 605 /* 606 * inotify_new_watch could race with another thread which did an 607 * inotify_new_watch between the update_existing and the add watch 608 * here, go back and try to update an existing mark again. 609 */ 610 if (ret == -EEXIST) 611 goto retry; 612 613 return ret; 614 } 615 616 static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events) 617 { 618 struct fsnotify_group *group; 619 unsigned int grp_num; 620 621 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */ 622 grp_num = (INOTIFY_GROUP_NUM - atomic_inc_return(&inotify_grp_num)); 623 group = fsnotify_obtain_group(grp_num, 0, &inotify_fsnotify_ops); 624 if (IS_ERR(group)) 625 return group; 626 627 group->max_events = max_events; 628 629 spin_lock_init(&group->inotify_data.idr_lock); 630 idr_init(&group->inotify_data.idr); 631 group->inotify_data.last_wd = 0; 632 group->inotify_data.user = user; 633 group->inotify_data.fa = NULL; 634 635 return group; 636 } 637 638 639 /* inotify syscalls */ 640 SYSCALL_DEFINE1(inotify_init1, int, flags) 641 { 642 struct fsnotify_group *group; 643 struct user_struct *user; 644 int ret; 645 646 /* Check the IN_* constants for consistency. */ 647 BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC); 648 BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK); 649 650 if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) 651 return -EINVAL; 652 653 user = get_current_user(); 654 if (unlikely(atomic_read(&user->inotify_devs) >= 655 inotify_max_user_instances)) { 656 ret = -EMFILE; 657 goto out_free_uid; 658 } 659 660 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */ 661 group = inotify_new_group(user, inotify_max_queued_events); 662 if (IS_ERR(group)) { 663 ret = PTR_ERR(group); 664 goto out_free_uid; 665 } 666 667 atomic_inc(&user->inotify_devs); 668 669 ret = anon_inode_getfd("inotify", &inotify_fops, group, 670 O_RDONLY | flags); 671 if (ret >= 0) 672 return ret; 673 674 atomic_dec(&user->inotify_devs); 675 out_free_uid: 676 free_uid(user); 677 return ret; 678 } 679 680 SYSCALL_DEFINE0(inotify_init) 681 { 682 return sys_inotify_init1(0); 683 } 684 685 SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, 686 u32, mask) 687 { 688 struct fsnotify_group *group; 689 struct inode *inode; 690 struct path path; 691 struct file *filp; 692 int ret, fput_needed; 693 unsigned flags = 0; 694 695 filp = fget_light(fd, &fput_needed); 696 if (unlikely(!filp)) 697 return -EBADF; 698 699 /* verify that this is indeed an inotify instance */ 700 if (unlikely(filp->f_op != &inotify_fops)) { 701 ret = -EINVAL; 702 goto fput_and_out; 703 } 704 705 if (!(mask & IN_DONT_FOLLOW)) 706 flags |= LOOKUP_FOLLOW; 707 if (mask & IN_ONLYDIR) 708 flags |= LOOKUP_DIRECTORY; 709 710 ret = inotify_find_inode(pathname, &path, flags); 711 if (ret) 712 goto fput_and_out; 713 714 /* inode held in place by reference to path; group by fget on fd */ 715 inode = path.dentry->d_inode; 716 group = filp->private_data; 717 718 /* create/update an inode mark */ 719 ret = inotify_update_watch(group, inode, mask); 720 path_put(&path); 721 fput_and_out: 722 fput_light(filp, fput_needed); 723 return ret; 724 } 725 726 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd) 727 { 728 struct fsnotify_group *group; 729 struct fsnotify_mark_entry *entry; 730 struct file *filp; 731 int ret = 0, fput_needed; 732 733 filp = fget_light(fd, &fput_needed); 734 if (unlikely(!filp)) 735 return -EBADF; 736 737 /* verify that this is indeed an inotify instance */ 738 if (unlikely(filp->f_op != &inotify_fops)) { 739 ret = -EINVAL; 740 goto out; 741 } 742 743 group = filp->private_data; 744 745 spin_lock(&group->inotify_data.idr_lock); 746 entry = idr_find(&group->inotify_data.idr, wd); 747 if (unlikely(!entry)) { 748 spin_unlock(&group->inotify_data.idr_lock); 749 ret = -EINVAL; 750 goto out; 751 } 752 fsnotify_get_mark(entry); 753 spin_unlock(&group->inotify_data.idr_lock); 754 755 fsnotify_destroy_mark_by_entry(entry); 756 fsnotify_put_mark(entry); 757 758 out: 759 fput_light(filp, fput_needed); 760 return ret; 761 } 762 763 /* 764 * inotify_user_setup - Our initialization function. Note that we cannnot return 765 * error because we have compiled-in VFS hooks. So an (unlikely) failure here 766 * must result in panic(). 767 */ 768 static int __init inotify_user_setup(void) 769 { 770 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC); 771 event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC); 772 773 inotify_max_queued_events = 16384; 774 inotify_max_user_instances = 128; 775 inotify_max_user_watches = 8192; 776 777 return 0; 778 } 779 module_init(inotify_user_setup); 780