1 /* 2 * fs/inotify_user.c - inotify support for userspace 3 * 4 * Authors: 5 * John McCutchan <ttb@tentacle.dhs.org> 6 * Robert Love <rml@novell.com> 7 * 8 * Copyright (C) 2005 John McCutchan 9 * Copyright 2006 Hewlett-Packard Development Company, L.P. 10 * 11 * This program is free software; you can redistribute it and/or modify it 12 * under the terms of the GNU General Public License as published by the 13 * Free Software Foundation; either version 2, or (at your option) any 14 * later version. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 */ 21 22 #include <linux/kernel.h> 23 #include <linux/sched.h> 24 #include <linux/slab.h> 25 #include <linux/fs.h> 26 #include <linux/file.h> 27 #include <linux/mount.h> 28 #include <linux/namei.h> 29 #include <linux/poll.h> 30 #include <linux/init.h> 31 #include <linux/list.h> 32 #include <linux/inotify.h> 33 #include <linux/syscalls.h> 34 #include <linux/magic.h> 35 36 #include <asm/ioctls.h> 37 38 static struct kmem_cache *watch_cachep __read_mostly; 39 static struct kmem_cache *event_cachep __read_mostly; 40 41 static struct vfsmount *inotify_mnt __read_mostly; 42 43 /* these are configurable via /proc/sys/fs/inotify/ */ 44 static int inotify_max_user_instances __read_mostly; 45 static int inotify_max_user_watches __read_mostly; 46 static int inotify_max_queued_events __read_mostly; 47 48 /* 49 * Lock ordering: 50 * 51 * inotify_dev->up_mutex (ensures we don't re-add the same watch) 52 * inode->inotify_mutex (protects inode's watch list) 53 * inotify_handle->mutex (protects inotify_handle's watch list) 54 * inotify_dev->ev_mutex (protects device's event queue) 55 */ 56 57 /* 58 * Lifetimes of the main data structures: 59 * 60 * inotify_device: Lifetime is managed by reference count, from 61 * sys_inotify_init() until release. Additional references can bump the count 62 * via get_inotify_dev() and drop the count via put_inotify_dev(). 63 * 64 * inotify_user_watch: Lifetime is from create_watch() to the receipt of an 65 * IN_IGNORED event from inotify, or when using IN_ONESHOT, to receipt of the 66 * first event, or to inotify_destroy(). 67 */ 68 69 /* 70 * struct inotify_device - represents an inotify instance 71 * 72 * This structure is protected by the mutex 'mutex'. 73 */ 74 struct inotify_device { 75 wait_queue_head_t wq; /* wait queue for i/o */ 76 struct mutex ev_mutex; /* protects event queue */ 77 struct mutex up_mutex; /* synchronizes watch updates */ 78 struct list_head events; /* list of queued events */ 79 struct user_struct *user; /* user who opened this dev */ 80 struct inotify_handle *ih; /* inotify handle */ 81 struct fasync_struct *fa; /* async notification */ 82 atomic_t count; /* reference count */ 83 unsigned int queue_size; /* size of the queue (bytes) */ 84 unsigned int event_count; /* number of pending events */ 85 unsigned int max_events; /* maximum number of events */ 86 }; 87 88 /* 89 * struct inotify_kernel_event - An inotify event, originating from a watch and 90 * queued for user-space. A list of these is attached to each instance of the 91 * device. In read(), this list is walked and all events that can fit in the 92 * buffer are returned. 93 * 94 * Protected by dev->ev_mutex of the device in which we are queued. 95 */ 96 struct inotify_kernel_event { 97 struct inotify_event event; /* the user-space event */ 98 struct list_head list; /* entry in inotify_device's list */ 99 char *name; /* filename, if any */ 100 }; 101 102 /* 103 * struct inotify_user_watch - our version of an inotify_watch, we add 104 * a reference to the associated inotify_device. 105 */ 106 struct inotify_user_watch { 107 struct inotify_device *dev; /* associated device */ 108 struct inotify_watch wdata; /* inotify watch data */ 109 }; 110 111 #ifdef CONFIG_SYSCTL 112 113 #include <linux/sysctl.h> 114 115 static int zero; 116 117 ctl_table inotify_table[] = { 118 { 119 .ctl_name = INOTIFY_MAX_USER_INSTANCES, 120 .procname = "max_user_instances", 121 .data = &inotify_max_user_instances, 122 .maxlen = sizeof(int), 123 .mode = 0644, 124 .proc_handler = &proc_dointvec_minmax, 125 .strategy = &sysctl_intvec, 126 .extra1 = &zero, 127 }, 128 { 129 .ctl_name = INOTIFY_MAX_USER_WATCHES, 130 .procname = "max_user_watches", 131 .data = &inotify_max_user_watches, 132 .maxlen = sizeof(int), 133 .mode = 0644, 134 .proc_handler = &proc_dointvec_minmax, 135 .strategy = &sysctl_intvec, 136 .extra1 = &zero, 137 }, 138 { 139 .ctl_name = INOTIFY_MAX_QUEUED_EVENTS, 140 .procname = "max_queued_events", 141 .data = &inotify_max_queued_events, 142 .maxlen = sizeof(int), 143 .mode = 0644, 144 .proc_handler = &proc_dointvec_minmax, 145 .strategy = &sysctl_intvec, 146 .extra1 = &zero 147 }, 148 { .ctl_name = 0 } 149 }; 150 #endif /* CONFIG_SYSCTL */ 151 152 static inline void get_inotify_dev(struct inotify_device *dev) 153 { 154 atomic_inc(&dev->count); 155 } 156 157 static inline void put_inotify_dev(struct inotify_device *dev) 158 { 159 if (atomic_dec_and_test(&dev->count)) { 160 atomic_dec(&dev->user->inotify_devs); 161 free_uid(dev->user); 162 kfree(dev); 163 } 164 } 165 166 /* 167 * free_inotify_user_watch - cleans up the watch and its references 168 */ 169 static void free_inotify_user_watch(struct inotify_watch *w) 170 { 171 struct inotify_user_watch *watch; 172 struct inotify_device *dev; 173 174 watch = container_of(w, struct inotify_user_watch, wdata); 175 dev = watch->dev; 176 177 atomic_dec(&dev->user->inotify_watches); 178 put_inotify_dev(dev); 179 kmem_cache_free(watch_cachep, watch); 180 } 181 182 /* 183 * kernel_event - create a new kernel event with the given parameters 184 * 185 * This function can sleep. 186 */ 187 static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, 188 const char *name) 189 { 190 struct inotify_kernel_event *kevent; 191 192 kevent = kmem_cache_alloc(event_cachep, GFP_NOFS); 193 if (unlikely(!kevent)) 194 return NULL; 195 196 /* we hand this out to user-space, so zero it just in case */ 197 memset(&kevent->event, 0, sizeof(struct inotify_event)); 198 199 kevent->event.wd = wd; 200 kevent->event.mask = mask; 201 kevent->event.cookie = cookie; 202 203 INIT_LIST_HEAD(&kevent->list); 204 205 if (name) { 206 size_t len, rem, event_size = sizeof(struct inotify_event); 207 208 /* 209 * We need to pad the filename so as to properly align an 210 * array of inotify_event structures. Because the structure is 211 * small and the common case is a small filename, we just round 212 * up to the next multiple of the structure's sizeof. This is 213 * simple and safe for all architectures. 214 */ 215 len = strlen(name) + 1; 216 rem = event_size - len; 217 if (len > event_size) { 218 rem = event_size - (len % event_size); 219 if (len % event_size == 0) 220 rem = 0; 221 } 222 223 kevent->name = kmalloc(len + rem, GFP_NOFS); 224 if (unlikely(!kevent->name)) { 225 kmem_cache_free(event_cachep, kevent); 226 return NULL; 227 } 228 memcpy(kevent->name, name, len); 229 if (rem) 230 memset(kevent->name + len, 0, rem); 231 kevent->event.len = len + rem; 232 } else { 233 kevent->event.len = 0; 234 kevent->name = NULL; 235 } 236 237 return kevent; 238 } 239 240 /* 241 * inotify_dev_get_event - return the next event in the given dev's queue 242 * 243 * Caller must hold dev->ev_mutex. 244 */ 245 static inline struct inotify_kernel_event * 246 inotify_dev_get_event(struct inotify_device *dev) 247 { 248 return list_entry(dev->events.next, struct inotify_kernel_event, list); 249 } 250 251 /* 252 * inotify_dev_get_last_event - return the last event in the given dev's queue 253 * 254 * Caller must hold dev->ev_mutex. 255 */ 256 static inline struct inotify_kernel_event * 257 inotify_dev_get_last_event(struct inotify_device *dev) 258 { 259 if (list_empty(&dev->events)) 260 return NULL; 261 return list_entry(dev->events.prev, struct inotify_kernel_event, list); 262 } 263 264 /* 265 * inotify_dev_queue_event - event handler registered with core inotify, adds 266 * a new event to the given device 267 * 268 * Can sleep (calls kernel_event()). 269 */ 270 static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask, 271 u32 cookie, const char *name, 272 struct inode *ignored) 273 { 274 struct inotify_user_watch *watch; 275 struct inotify_device *dev; 276 struct inotify_kernel_event *kevent, *last; 277 278 watch = container_of(w, struct inotify_user_watch, wdata); 279 dev = watch->dev; 280 281 mutex_lock(&dev->ev_mutex); 282 283 /* we can safely put the watch as we don't reference it while 284 * generating the event 285 */ 286 if (mask & IN_IGNORED || w->mask & IN_ONESHOT) 287 put_inotify_watch(w); /* final put */ 288 289 /* coalescing: drop this event if it is a dupe of the previous */ 290 last = inotify_dev_get_last_event(dev); 291 if (last && last->event.mask == mask && last->event.wd == wd && 292 last->event.cookie == cookie) { 293 const char *lastname = last->name; 294 295 if (!name && !lastname) 296 goto out; 297 if (name && lastname && !strcmp(lastname, name)) 298 goto out; 299 } 300 301 /* the queue overflowed and we already sent the Q_OVERFLOW event */ 302 if (unlikely(dev->event_count > dev->max_events)) 303 goto out; 304 305 /* if the queue overflows, we need to notify user space */ 306 if (unlikely(dev->event_count == dev->max_events)) 307 kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL); 308 else 309 kevent = kernel_event(wd, mask, cookie, name); 310 311 if (unlikely(!kevent)) 312 goto out; 313 314 /* queue the event and wake up anyone waiting */ 315 dev->event_count++; 316 dev->queue_size += sizeof(struct inotify_event) + kevent->event.len; 317 list_add_tail(&kevent->list, &dev->events); 318 wake_up_interruptible(&dev->wq); 319 kill_fasync(&dev->fa, SIGIO, POLL_IN); 320 321 out: 322 mutex_unlock(&dev->ev_mutex); 323 } 324 325 /* 326 * remove_kevent - cleans up the given kevent 327 * 328 * Caller must hold dev->ev_mutex. 329 */ 330 static void remove_kevent(struct inotify_device *dev, 331 struct inotify_kernel_event *kevent) 332 { 333 list_del(&kevent->list); 334 335 dev->event_count--; 336 dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; 337 } 338 339 /* 340 * free_kevent - frees the given kevent. 341 */ 342 static void free_kevent(struct inotify_kernel_event *kevent) 343 { 344 kfree(kevent->name); 345 kmem_cache_free(event_cachep, kevent); 346 } 347 348 /* 349 * inotify_dev_event_dequeue - destroy an event on the given device 350 * 351 * Caller must hold dev->ev_mutex. 352 */ 353 static void inotify_dev_event_dequeue(struct inotify_device *dev) 354 { 355 if (!list_empty(&dev->events)) { 356 struct inotify_kernel_event *kevent; 357 kevent = inotify_dev_get_event(dev); 358 remove_kevent(dev, kevent); 359 free_kevent(kevent); 360 } 361 } 362 363 /* 364 * find_inode - resolve a user-given path to a specific inode 365 */ 366 static int find_inode(const char __user *dirname, struct path *path, 367 unsigned flags) 368 { 369 int error; 370 371 error = user_path_at(AT_FDCWD, dirname, flags, path); 372 if (error) 373 return error; 374 /* you can only watch an inode if you have read permissions on it */ 375 error = inode_permission(path->dentry->d_inode, MAY_READ); 376 if (error) 377 path_put(path); 378 return error; 379 } 380 381 /* 382 * create_watch - creates a watch on the given device. 383 * 384 * Callers must hold dev->up_mutex. 385 */ 386 static int create_watch(struct inotify_device *dev, struct inode *inode, 387 u32 mask) 388 { 389 struct inotify_user_watch *watch; 390 int ret; 391 392 if (atomic_read(&dev->user->inotify_watches) >= 393 inotify_max_user_watches) 394 return -ENOSPC; 395 396 watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL); 397 if (unlikely(!watch)) 398 return -ENOMEM; 399 400 /* save a reference to device and bump the count to make it official */ 401 get_inotify_dev(dev); 402 watch->dev = dev; 403 404 atomic_inc(&dev->user->inotify_watches); 405 406 inotify_init_watch(&watch->wdata); 407 ret = inotify_add_watch(dev->ih, &watch->wdata, inode, mask); 408 if (ret < 0) 409 free_inotify_user_watch(&watch->wdata); 410 411 return ret; 412 } 413 414 /* Device Interface */ 415 416 static unsigned int inotify_poll(struct file *file, poll_table *wait) 417 { 418 struct inotify_device *dev = file->private_data; 419 int ret = 0; 420 421 poll_wait(file, &dev->wq, wait); 422 mutex_lock(&dev->ev_mutex); 423 if (!list_empty(&dev->events)) 424 ret = POLLIN | POLLRDNORM; 425 mutex_unlock(&dev->ev_mutex); 426 427 return ret; 428 } 429 430 /* 431 * Get an inotify_kernel_event if one exists and is small 432 * enough to fit in "count". Return an error pointer if 433 * not large enough. 434 * 435 * Called with the device ev_mutex held. 436 */ 437 static struct inotify_kernel_event *get_one_event(struct inotify_device *dev, 438 size_t count) 439 { 440 size_t event_size = sizeof(struct inotify_event); 441 struct inotify_kernel_event *kevent; 442 443 if (list_empty(&dev->events)) 444 return NULL; 445 446 kevent = inotify_dev_get_event(dev); 447 if (kevent->name) 448 event_size += kevent->event.len; 449 450 if (event_size > count) 451 return ERR_PTR(-EINVAL); 452 453 remove_kevent(dev, kevent); 454 return kevent; 455 } 456 457 /* 458 * Copy an event to user space, returning how much we copied. 459 * 460 * We already checked that the event size is smaller than the 461 * buffer we had in "get_one_event()" above. 462 */ 463 static ssize_t copy_event_to_user(struct inotify_kernel_event *kevent, 464 char __user *buf) 465 { 466 size_t event_size = sizeof(struct inotify_event); 467 468 if (copy_to_user(buf, &kevent->event, event_size)) 469 return -EFAULT; 470 471 if (kevent->name) { 472 buf += event_size; 473 474 if (copy_to_user(buf, kevent->name, kevent->event.len)) 475 return -EFAULT; 476 477 event_size += kevent->event.len; 478 } 479 return event_size; 480 } 481 482 static ssize_t inotify_read(struct file *file, char __user *buf, 483 size_t count, loff_t *pos) 484 { 485 struct inotify_device *dev; 486 char __user *start; 487 int ret; 488 DEFINE_WAIT(wait); 489 490 start = buf; 491 dev = file->private_data; 492 493 while (1) { 494 struct inotify_kernel_event *kevent; 495 496 prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); 497 498 mutex_lock(&dev->ev_mutex); 499 kevent = get_one_event(dev, count); 500 mutex_unlock(&dev->ev_mutex); 501 502 if (kevent) { 503 ret = PTR_ERR(kevent); 504 if (IS_ERR(kevent)) 505 break; 506 ret = copy_event_to_user(kevent, buf); 507 free_kevent(kevent); 508 if (ret < 0) 509 break; 510 buf += ret; 511 count -= ret; 512 continue; 513 } 514 515 ret = -EAGAIN; 516 if (file->f_flags & O_NONBLOCK) 517 break; 518 ret = -EINTR; 519 if (signal_pending(current)) 520 break; 521 522 if (start != buf) 523 break; 524 525 schedule(); 526 } 527 528 finish_wait(&dev->wq, &wait); 529 if (start != buf && ret != -EFAULT) 530 ret = buf - start; 531 return ret; 532 } 533 534 static int inotify_fasync(int fd, struct file *file, int on) 535 { 536 struct inotify_device *dev = file->private_data; 537 538 return fasync_helper(fd, file, on, &dev->fa) >= 0 ? 0 : -EIO; 539 } 540 541 static int inotify_release(struct inode *ignored, struct file *file) 542 { 543 struct inotify_device *dev = file->private_data; 544 545 inotify_destroy(dev->ih); 546 547 /* destroy all of the events on this device */ 548 mutex_lock(&dev->ev_mutex); 549 while (!list_empty(&dev->events)) 550 inotify_dev_event_dequeue(dev); 551 mutex_unlock(&dev->ev_mutex); 552 553 /* free this device: the put matching the get in inotify_init() */ 554 put_inotify_dev(dev); 555 556 return 0; 557 } 558 559 static long inotify_ioctl(struct file *file, unsigned int cmd, 560 unsigned long arg) 561 { 562 struct inotify_device *dev; 563 void __user *p; 564 int ret = -ENOTTY; 565 566 dev = file->private_data; 567 p = (void __user *) arg; 568 569 switch (cmd) { 570 case FIONREAD: 571 ret = put_user(dev->queue_size, (int __user *) p); 572 break; 573 } 574 575 return ret; 576 } 577 578 static const struct file_operations inotify_fops = { 579 .poll = inotify_poll, 580 .read = inotify_read, 581 .fasync = inotify_fasync, 582 .release = inotify_release, 583 .unlocked_ioctl = inotify_ioctl, 584 .compat_ioctl = inotify_ioctl, 585 }; 586 587 static const struct inotify_operations inotify_user_ops = { 588 .handle_event = inotify_dev_queue_event, 589 .destroy_watch = free_inotify_user_watch, 590 }; 591 592 SYSCALL_DEFINE1(inotify_init1, int, flags) 593 { 594 struct inotify_device *dev; 595 struct inotify_handle *ih; 596 struct user_struct *user; 597 struct file *filp; 598 int fd, ret; 599 600 /* Check the IN_* constants for consistency. */ 601 BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC); 602 BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK); 603 604 if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) 605 return -EINVAL; 606 607 fd = get_unused_fd_flags(flags & O_CLOEXEC); 608 if (fd < 0) 609 return fd; 610 611 filp = get_empty_filp(); 612 if (!filp) { 613 ret = -ENFILE; 614 goto out_put_fd; 615 } 616 617 user = get_current_user(); 618 if (unlikely(atomic_read(&user->inotify_devs) >= 619 inotify_max_user_instances)) { 620 ret = -EMFILE; 621 goto out_free_uid; 622 } 623 624 dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL); 625 if (unlikely(!dev)) { 626 ret = -ENOMEM; 627 goto out_free_uid; 628 } 629 630 ih = inotify_init(&inotify_user_ops); 631 if (IS_ERR(ih)) { 632 ret = PTR_ERR(ih); 633 goto out_free_dev; 634 } 635 dev->ih = ih; 636 dev->fa = NULL; 637 638 filp->f_op = &inotify_fops; 639 filp->f_path.mnt = mntget(inotify_mnt); 640 filp->f_path.dentry = dget(inotify_mnt->mnt_root); 641 filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping; 642 filp->f_mode = FMODE_READ; 643 filp->f_flags = O_RDONLY | (flags & O_NONBLOCK); 644 filp->private_data = dev; 645 646 INIT_LIST_HEAD(&dev->events); 647 init_waitqueue_head(&dev->wq); 648 mutex_init(&dev->ev_mutex); 649 mutex_init(&dev->up_mutex); 650 dev->event_count = 0; 651 dev->queue_size = 0; 652 dev->max_events = inotify_max_queued_events; 653 dev->user = user; 654 atomic_set(&dev->count, 0); 655 656 get_inotify_dev(dev); 657 atomic_inc(&user->inotify_devs); 658 fd_install(fd, filp); 659 660 return fd; 661 out_free_dev: 662 kfree(dev); 663 out_free_uid: 664 free_uid(user); 665 put_filp(filp); 666 out_put_fd: 667 put_unused_fd(fd); 668 return ret; 669 } 670 671 SYSCALL_DEFINE0(inotify_init) 672 { 673 return sys_inotify_init1(0); 674 } 675 676 SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, 677 u32, mask) 678 { 679 struct inode *inode; 680 struct inotify_device *dev; 681 struct path path; 682 struct file *filp; 683 int ret, fput_needed; 684 unsigned flags = 0; 685 686 filp = fget_light(fd, &fput_needed); 687 if (unlikely(!filp)) 688 return -EBADF; 689 690 /* verify that this is indeed an inotify instance */ 691 if (unlikely(filp->f_op != &inotify_fops)) { 692 ret = -EINVAL; 693 goto fput_and_out; 694 } 695 696 if (!(mask & IN_DONT_FOLLOW)) 697 flags |= LOOKUP_FOLLOW; 698 if (mask & IN_ONLYDIR) 699 flags |= LOOKUP_DIRECTORY; 700 701 ret = find_inode(pathname, &path, flags); 702 if (unlikely(ret)) 703 goto fput_and_out; 704 705 /* inode held in place by reference to path; dev by fget on fd */ 706 inode = path.dentry->d_inode; 707 dev = filp->private_data; 708 709 mutex_lock(&dev->up_mutex); 710 ret = inotify_find_update_watch(dev->ih, inode, mask); 711 if (ret == -ENOENT) 712 ret = create_watch(dev, inode, mask); 713 mutex_unlock(&dev->up_mutex); 714 715 path_put(&path); 716 fput_and_out: 717 fput_light(filp, fput_needed); 718 return ret; 719 } 720 721 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd) 722 { 723 struct file *filp; 724 struct inotify_device *dev; 725 int ret, fput_needed; 726 727 filp = fget_light(fd, &fput_needed); 728 if (unlikely(!filp)) 729 return -EBADF; 730 731 /* verify that this is indeed an inotify instance */ 732 if (unlikely(filp->f_op != &inotify_fops)) { 733 ret = -EINVAL; 734 goto out; 735 } 736 737 dev = filp->private_data; 738 739 /* we free our watch data when we get IN_IGNORED */ 740 ret = inotify_rm_wd(dev->ih, wd); 741 742 out: 743 fput_light(filp, fput_needed); 744 return ret; 745 } 746 747 static int 748 inotify_get_sb(struct file_system_type *fs_type, int flags, 749 const char *dev_name, void *data, struct vfsmount *mnt) 750 { 751 return get_sb_pseudo(fs_type, "inotify", NULL, 752 INOTIFYFS_SUPER_MAGIC, mnt); 753 } 754 755 static struct file_system_type inotify_fs_type = { 756 .name = "inotifyfs", 757 .get_sb = inotify_get_sb, 758 .kill_sb = kill_anon_super, 759 }; 760 761 /* 762 * inotify_user_setup - Our initialization function. Note that we cannnot return 763 * error because we have compiled-in VFS hooks. So an (unlikely) failure here 764 * must result in panic(). 765 */ 766 static int __init inotify_user_setup(void) 767 { 768 int ret; 769 770 ret = register_filesystem(&inotify_fs_type); 771 if (unlikely(ret)) 772 panic("inotify: register_filesystem returned %d!\n", ret); 773 774 inotify_mnt = kern_mount(&inotify_fs_type); 775 if (IS_ERR(inotify_mnt)) 776 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt)); 777 778 inotify_max_queued_events = 16384; 779 inotify_max_user_instances = 128; 780 inotify_max_user_watches = 8192; 781 782 watch_cachep = kmem_cache_create("inotify_watch_cache", 783 sizeof(struct inotify_user_watch), 784 0, SLAB_PANIC, NULL); 785 event_cachep = kmem_cache_create("inotify_event_cache", 786 sizeof(struct inotify_kernel_event), 787 0, SLAB_PANIC, NULL); 788 789 return 0; 790 } 791 792 module_init(inotify_user_setup); 793