1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Directory notifications for Linux. 4 * 5 * Copyright (C) 2000,2001,2002 Stephen Rothwell 6 * 7 * Copyright (C) 2009 Eric Paris <Red Hat Inc> 8 * dnotify was largly rewritten to use the new fsnotify infrastructure 9 */ 10 #include <linux/fs.h> 11 #include <linux/module.h> 12 #include <linux/sched.h> 13 #include <linux/sched/signal.h> 14 #include <linux/dnotify.h> 15 #include <linux/init.h> 16 #include <linux/spinlock.h> 17 #include <linux/slab.h> 18 #include <linux/fdtable.h> 19 #include <linux/fsnotify_backend.h> 20 21 int dir_notify_enable __read_mostly = 1; 22 23 static struct kmem_cache *dnotify_struct_cache __read_mostly; 24 static struct kmem_cache *dnotify_mark_cache __read_mostly; 25 static struct fsnotify_group *dnotify_group __read_mostly; 26 27 /* 28 * dnotify will attach one of these to each inode (i_fsnotify_marks) which 29 * is being watched by dnotify. If multiple userspace applications are watching 30 * the same directory with dnotify their information is chained in dn 31 */ 32 struct dnotify_mark { 33 struct fsnotify_mark fsn_mark; 34 struct dnotify_struct *dn; 35 }; 36 37 /* 38 * When a process starts or stops watching an inode the set of events which 39 * dnotify cares about for that inode may change. This function runs the 40 * list of everything receiving dnotify events about this directory and calculates 41 * the set of all those events. After it updates what dnotify is interested in 42 * it calls the fsnotify function so it can update the set of all events relevant 43 * to this inode. 44 */ 45 static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark) 46 { 47 __u32 new_mask = 0; 48 struct dnotify_struct *dn; 49 struct dnotify_mark *dn_mark = container_of(fsn_mark, 50 struct dnotify_mark, 51 fsn_mark); 52 53 assert_spin_locked(&fsn_mark->lock); 54 55 for (dn = dn_mark->dn; dn != NULL; dn = dn->dn_next) 56 new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT); 57 if (fsn_mark->mask == new_mask) 58 return; 59 fsn_mark->mask = new_mask; 60 61 fsnotify_recalc_mask(fsn_mark->connector); 62 } 63 64 /* 65 * Mains fsnotify call where events are delivered to dnotify. 66 * Find the dnotify mark on the relevant inode, run the list of dnotify structs 67 * on that mark and determine which of them has expressed interest in receiving 68 * events of this type. When found send the correct process and signal and 69 * destroy the dnotify struct if it was not registered to receive multiple 70 * events. 71 */ 72 static int dnotify_handle_event(struct fsnotify_group *group, 73 struct inode *inode, 74 u32 mask, const void *data, int data_type, 75 const struct qstr *file_name, u32 cookie, 76 struct fsnotify_iter_info *iter_info) 77 { 78 struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); 79 struct dnotify_mark *dn_mark; 80 struct dnotify_struct *dn; 81 struct dnotify_struct **prev; 82 struct fown_struct *fown; 83 __u32 test_mask = mask & ~FS_EVENT_ON_CHILD; 84 85 /* not a dir, dnotify doesn't care */ 86 if (!S_ISDIR(inode->i_mode)) 87 return 0; 88 89 if (WARN_ON(fsnotify_iter_vfsmount_mark(iter_info))) 90 return 0; 91 92 dn_mark = container_of(inode_mark, struct dnotify_mark, fsn_mark); 93 94 spin_lock(&inode_mark->lock); 95 prev = &dn_mark->dn; 96 while ((dn = *prev) != NULL) { 97 if ((dn->dn_mask & test_mask) == 0) { 98 prev = &dn->dn_next; 99 continue; 100 } 101 fown = &dn->dn_filp->f_owner; 102 send_sigio(fown, dn->dn_fd, POLL_MSG); 103 if (dn->dn_mask & FS_DN_MULTISHOT) 104 prev = &dn->dn_next; 105 else { 106 *prev = dn->dn_next; 107 kmem_cache_free(dnotify_struct_cache, dn); 108 dnotify_recalc_inode_mask(inode_mark); 109 } 110 } 111 112 spin_unlock(&inode_mark->lock); 113 114 return 0; 115 } 116 117 static void dnotify_free_mark(struct fsnotify_mark *fsn_mark) 118 { 119 struct dnotify_mark *dn_mark = container_of(fsn_mark, 120 struct dnotify_mark, 121 fsn_mark); 122 123 BUG_ON(dn_mark->dn); 124 125 kmem_cache_free(dnotify_mark_cache, dn_mark); 126 } 127 128 static const struct fsnotify_ops dnotify_fsnotify_ops = { 129 .handle_event = dnotify_handle_event, 130 .free_mark = dnotify_free_mark, 131 }; 132 133 /* 134 * Called every time a file is closed. Looks first for a dnotify mark on the 135 * inode. If one is found run all of the ->dn structures attached to that 136 * mark for one relevant to this process closing the file and remove that 137 * dnotify_struct. If that was the last dnotify_struct also remove the 138 * fsnotify_mark. 139 */ 140 void dnotify_flush(struct file *filp, fl_owner_t id) 141 { 142 struct fsnotify_mark *fsn_mark; 143 struct dnotify_mark *dn_mark; 144 struct dnotify_struct *dn; 145 struct dnotify_struct **prev; 146 struct inode *inode; 147 bool free = false; 148 149 inode = file_inode(filp); 150 if (!S_ISDIR(inode->i_mode)) 151 return; 152 153 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, dnotify_group); 154 if (!fsn_mark) 155 return; 156 dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); 157 158 mutex_lock(&dnotify_group->mark_mutex); 159 160 spin_lock(&fsn_mark->lock); 161 prev = &dn_mark->dn; 162 while ((dn = *prev) != NULL) { 163 if ((dn->dn_owner == id) && (dn->dn_filp == filp)) { 164 *prev = dn->dn_next; 165 kmem_cache_free(dnotify_struct_cache, dn); 166 dnotify_recalc_inode_mask(fsn_mark); 167 break; 168 } 169 prev = &dn->dn_next; 170 } 171 172 spin_unlock(&fsn_mark->lock); 173 174 /* nothing else could have found us thanks to the dnotify_groups 175 mark_mutex */ 176 if (dn_mark->dn == NULL) { 177 fsnotify_detach_mark(fsn_mark); 178 free = true; 179 } 180 181 mutex_unlock(&dnotify_group->mark_mutex); 182 183 if (free) 184 fsnotify_free_mark(fsn_mark); 185 fsnotify_put_mark(fsn_mark); 186 } 187 188 /* this conversion is done only at watch creation */ 189 static __u32 convert_arg(unsigned long arg) 190 { 191 __u32 new_mask = FS_EVENT_ON_CHILD; 192 193 if (arg & DN_MULTISHOT) 194 new_mask |= FS_DN_MULTISHOT; 195 if (arg & DN_DELETE) 196 new_mask |= (FS_DELETE | FS_MOVED_FROM); 197 if (arg & DN_MODIFY) 198 new_mask |= FS_MODIFY; 199 if (arg & DN_ACCESS) 200 new_mask |= FS_ACCESS; 201 if (arg & DN_ATTRIB) 202 new_mask |= FS_ATTRIB; 203 if (arg & DN_RENAME) 204 new_mask |= FS_DN_RENAME; 205 if (arg & DN_CREATE) 206 new_mask |= (FS_CREATE | FS_MOVED_TO); 207 208 return new_mask; 209 } 210 211 /* 212 * If multiple processes watch the same inode with dnotify there is only one 213 * dnotify mark in inode->i_fsnotify_marks but we chain a dnotify_struct 214 * onto that mark. This function either attaches the new dnotify_struct onto 215 * that list, or it |= the mask onto an existing dnofiy_struct. 216 */ 217 static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark *dn_mark, 218 fl_owner_t id, int fd, struct file *filp, __u32 mask) 219 { 220 struct dnotify_struct *odn; 221 222 odn = dn_mark->dn; 223 while (odn != NULL) { 224 /* adding more events to existing dnofiy_struct? */ 225 if ((odn->dn_owner == id) && (odn->dn_filp == filp)) { 226 odn->dn_fd = fd; 227 odn->dn_mask |= mask; 228 return -EEXIST; 229 } 230 odn = odn->dn_next; 231 } 232 233 dn->dn_mask = mask; 234 dn->dn_fd = fd; 235 dn->dn_filp = filp; 236 dn->dn_owner = id; 237 dn->dn_next = dn_mark->dn; 238 dn_mark->dn = dn; 239 240 return 0; 241 } 242 243 /* 244 * When a process calls fcntl to attach a dnotify watch to a directory it ends 245 * up here. Allocate both a mark for fsnotify to add and a dnotify_struct to be 246 * attached to the fsnotify_mark. 247 */ 248 int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) 249 { 250 struct dnotify_mark *new_dn_mark, *dn_mark; 251 struct fsnotify_mark *new_fsn_mark, *fsn_mark; 252 struct dnotify_struct *dn; 253 struct inode *inode; 254 fl_owner_t id = current->files; 255 struct file *f; 256 int destroy = 0, error = 0; 257 __u32 mask; 258 259 /* we use these to tell if we need to kfree */ 260 new_fsn_mark = NULL; 261 dn = NULL; 262 263 if (!dir_notify_enable) { 264 error = -EINVAL; 265 goto out_err; 266 } 267 268 /* a 0 mask means we are explicitly removing the watch */ 269 if ((arg & ~DN_MULTISHOT) == 0) { 270 dnotify_flush(filp, id); 271 error = 0; 272 goto out_err; 273 } 274 275 /* dnotify only works on directories */ 276 inode = file_inode(filp); 277 if (!S_ISDIR(inode->i_mode)) { 278 error = -ENOTDIR; 279 goto out_err; 280 } 281 282 /* expect most fcntl to add new rather than augment old */ 283 dn = kmem_cache_alloc(dnotify_struct_cache, GFP_KERNEL); 284 if (!dn) { 285 error = -ENOMEM; 286 goto out_err; 287 } 288 289 /* new fsnotify mark, we expect most fcntl calls to add a new mark */ 290 new_dn_mark = kmem_cache_alloc(dnotify_mark_cache, GFP_KERNEL); 291 if (!new_dn_mark) { 292 error = -ENOMEM; 293 goto out_err; 294 } 295 296 /* convert the userspace DN_* "arg" to the internal FS_* defines in fsnotify */ 297 mask = convert_arg(arg); 298 299 /* set up the new_fsn_mark and new_dn_mark */ 300 new_fsn_mark = &new_dn_mark->fsn_mark; 301 fsnotify_init_mark(new_fsn_mark, dnotify_group); 302 new_fsn_mark->mask = mask; 303 new_dn_mark->dn = NULL; 304 305 /* this is needed to prevent the fcntl/close race described below */ 306 mutex_lock(&dnotify_group->mark_mutex); 307 308 /* add the new_fsn_mark or find an old one. */ 309 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, dnotify_group); 310 if (fsn_mark) { 311 dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); 312 spin_lock(&fsn_mark->lock); 313 } else { 314 error = fsnotify_add_inode_mark_locked(new_fsn_mark, inode, 0); 315 if (error) { 316 mutex_unlock(&dnotify_group->mark_mutex); 317 goto out_err; 318 } 319 spin_lock(&new_fsn_mark->lock); 320 fsn_mark = new_fsn_mark; 321 dn_mark = new_dn_mark; 322 /* we used new_fsn_mark, so don't free it */ 323 new_fsn_mark = NULL; 324 } 325 326 rcu_read_lock(); 327 f = fcheck(fd); 328 rcu_read_unlock(); 329 330 /* if (f != filp) means that we lost a race and another task/thread 331 * actually closed the fd we are still playing with before we grabbed 332 * the dnotify_groups mark_mutex and fsn_mark->lock. Since closing the 333 * fd is the only time we clean up the marks we need to get our mark 334 * off the list. */ 335 if (f != filp) { 336 /* if we added ourselves, shoot ourselves, it's possible that 337 * the flush actually did shoot this fsn_mark. That's fine too 338 * since multiple calls to destroy_mark is perfectly safe, if 339 * we found a dn_mark already attached to the inode, just sod 340 * off silently as the flush at close time dealt with it. 341 */ 342 if (dn_mark == new_dn_mark) 343 destroy = 1; 344 error = 0; 345 goto out; 346 } 347 348 __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0); 349 350 error = attach_dn(dn, dn_mark, id, fd, filp, mask); 351 /* !error means that we attached the dn to the dn_mark, so don't free it */ 352 if (!error) 353 dn = NULL; 354 /* -EEXIST means that we didn't add this new dn and used an old one. 355 * that isn't an error (and the unused dn should be freed) */ 356 else if (error == -EEXIST) 357 error = 0; 358 359 dnotify_recalc_inode_mask(fsn_mark); 360 out: 361 spin_unlock(&fsn_mark->lock); 362 363 if (destroy) 364 fsnotify_detach_mark(fsn_mark); 365 mutex_unlock(&dnotify_group->mark_mutex); 366 if (destroy) 367 fsnotify_free_mark(fsn_mark); 368 fsnotify_put_mark(fsn_mark); 369 out_err: 370 if (new_fsn_mark) 371 fsnotify_put_mark(new_fsn_mark); 372 if (dn) 373 kmem_cache_free(dnotify_struct_cache, dn); 374 return error; 375 } 376 377 static int __init dnotify_init(void) 378 { 379 dnotify_struct_cache = KMEM_CACHE(dnotify_struct, 380 SLAB_PANIC|SLAB_ACCOUNT); 381 dnotify_mark_cache = KMEM_CACHE(dnotify_mark, SLAB_PANIC|SLAB_ACCOUNT); 382 383 dnotify_group = fsnotify_alloc_group(&dnotify_fsnotify_ops); 384 if (IS_ERR(dnotify_group)) 385 panic("unable to allocate fsnotify group for dnotify\n"); 386 return 0; 387 } 388 389 module_init(dnotify_init) 390