1 /* 2 * fs/inotify_user.c - inotify support for userspace 3 * 4 * Authors: 5 * John McCutchan <ttb@tentacle.dhs.org> 6 * Robert Love <rml@novell.com> 7 * 8 * Copyright (C) 2005 John McCutchan 9 * Copyright 2006 Hewlett-Packard Development Company, L.P. 10 * 11 * Copyright (C) 2009 Eric Paris <Red Hat Inc> 12 * inotify was largely rewriten to make use of the fsnotify infrastructure 13 * 14 * This program is free software; you can redistribute it and/or modify it 15 * under the terms of the GNU General Public License as published by the 16 * Free Software Foundation; either version 2, or (at your option) any 17 * later version. 18 * 19 * This program is distributed in the hope that it will be useful, but 20 * WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 22 * General Public License for more details. 23 */ 24 25 #include <linux/fs.h> /* struct inode */ 26 #include <linux/fsnotify_backend.h> 27 #include <linux/inotify.h> 28 #include <linux/path.h> /* struct path */ 29 #include <linux/slab.h> /* kmem_* */ 30 #include <linux/types.h> 31 32 #include "inotify.h" 33 34 static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event) 35 { 36 struct fsnotify_mark_entry *entry; 37 struct inotify_inode_mark_entry *ientry; 38 struct inode *to_tell; 39 struct inotify_event_private_data *event_priv; 40 struct fsnotify_event_private_data *fsn_event_priv; 41 int wd, ret; 42 43 to_tell = event->to_tell; 44 45 spin_lock(&to_tell->i_lock); 46 entry = fsnotify_find_mark_entry(group, to_tell); 47 spin_unlock(&to_tell->i_lock); 48 /* race with watch removal? We already passes should_send */ 49 if (unlikely(!entry)) 50 return 0; 51 ientry = container_of(entry, struct inotify_inode_mark_entry, 52 fsn_entry); 53 wd = ientry->wd; 54 55 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL); 56 if (unlikely(!event_priv)) 57 return -ENOMEM; 58 59 fsn_event_priv = &event_priv->fsnotify_event_priv_data; 60 61 fsn_event_priv->group = group; 62 event_priv->wd = wd; 63 64 ret = fsnotify_add_notify_event(group, event, fsn_event_priv); 65 if (ret) { 66 inotify_free_event_priv(fsn_event_priv); 67 /* EEXIST says we tail matched, EOVERFLOW isn't something 68 * to report up the stack. */ 69 if ((ret == -EEXIST) || 70 (ret == -EOVERFLOW)) 71 ret = 0; 72 } 73 74 /* 75 * If we hold the entry until after the event is on the queue 76 * IN_IGNORED won't be able to pass this event in the queue 77 */ 78 fsnotify_put_mark(entry); 79 80 return ret; 81 } 82 83 static void inotify_freeing_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group) 84 { 85 inotify_ignored_and_remove_idr(entry, group); 86 } 87 88 static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode, __u32 mask) 89 { 90 struct fsnotify_mark_entry *entry; 91 bool send; 92 93 spin_lock(&inode->i_lock); 94 entry = fsnotify_find_mark_entry(group, inode); 95 spin_unlock(&inode->i_lock); 96 if (!entry) 97 return false; 98 99 mask = (mask & ~FS_EVENT_ON_CHILD); 100 send = (entry->mask & mask); 101 102 /* find took a reference */ 103 fsnotify_put_mark(entry); 104 105 return send; 106 } 107 108 /* 109 * This is NEVER supposed to be called. Inotify marks should either have been 110 * removed from the idr when the watch was removed or in the 111 * fsnotify_destroy_mark_by_group() call when the inotify instance was being 112 * torn down. This is only called if the idr is about to be freed but there 113 * are still marks in it. 114 */ 115 static int idr_callback(int id, void *p, void *data) 116 { 117 struct fsnotify_mark_entry *entry; 118 struct inotify_inode_mark_entry *ientry; 119 static bool warned = false; 120 121 if (warned) 122 return 0; 123 124 warned = false; 125 entry = p; 126 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 127 128 WARN(1, "inotify closing but id=%d for entry=%p in group=%p still in " 129 "idr. Probably leaking memory\n", id, p, data); 130 131 /* 132 * I'm taking the liberty of assuming that the mark in question is a 133 * valid address and I'm dereferencing it. This might help to figure 134 * out why we got here and the panic is no worse than the original 135 * BUG() that was here. 136 */ 137 if (entry) 138 printk(KERN_WARNING "entry->group=%p inode=%p wd=%d\n", 139 entry->group, entry->inode, ientry->wd); 140 return 0; 141 } 142 143 static void inotify_free_group_priv(struct fsnotify_group *group) 144 { 145 /* ideally the idr is empty and we won't hit the BUG in teh callback */ 146 idr_for_each(&group->inotify_data.idr, idr_callback, group); 147 idr_remove_all(&group->inotify_data.idr); 148 idr_destroy(&group->inotify_data.idr); 149 } 150 151 void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv) 152 { 153 struct inotify_event_private_data *event_priv; 154 155 156 event_priv = container_of(fsn_event_priv, struct inotify_event_private_data, 157 fsnotify_event_priv_data); 158 159 kmem_cache_free(event_priv_cachep, event_priv); 160 } 161 162 const struct fsnotify_ops inotify_fsnotify_ops = { 163 .handle_event = inotify_handle_event, 164 .should_send_event = inotify_should_send_event, 165 .free_group_priv = inotify_free_group_priv, 166 .free_event_priv = inotify_free_event_priv, 167 .freeing_mark = inotify_freeing_mark, 168 }; 169