1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Directory notifications for Linux. 4 * 5 * Copyright (C) 2000,2001,2002 Stephen Rothwell 6 * 7 * Copyright (C) 2009 Eric Paris <Red Hat Inc> 8 * dnotify was largly rewritten to use the new fsnotify infrastructure 9 */ 10 #include <linux/fs.h> 11 #include <linux/module.h> 12 #include <linux/sched.h> 13 #include <linux/sched/signal.h> 14 #include <linux/dnotify.h> 15 #include <linux/init.h> 16 #include <linux/security.h> 17 #include <linux/spinlock.h> 18 #include <linux/slab.h> 19 #include <linux/fdtable.h> 20 #include <linux/fsnotify_backend.h> 21 22 int dir_notify_enable __read_mostly = 1; 23 24 static struct kmem_cache *dnotify_struct_cache __read_mostly; 25 static struct kmem_cache *dnotify_mark_cache __read_mostly; 26 static struct fsnotify_group *dnotify_group __read_mostly; 27 28 /* 29 * dnotify will attach one of these to each inode (i_fsnotify_marks) which 30 * is being watched by dnotify. If multiple userspace applications are watching 31 * the same directory with dnotify their information is chained in dn 32 */ 33 struct dnotify_mark { 34 struct fsnotify_mark fsn_mark; 35 struct dnotify_struct *dn; 36 }; 37 38 /* 39 * When a process starts or stops watching an inode the set of events which 40 * dnotify cares about for that inode may change. This function runs the 41 * list of everything receiving dnotify events about this directory and calculates 42 * the set of all those events. After it updates what dnotify is interested in 43 * it calls the fsnotify function so it can update the set of all events relevant 44 * to this inode. 45 */ 46 static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark) 47 { 48 __u32 new_mask = 0; 49 struct dnotify_struct *dn; 50 struct dnotify_mark *dn_mark = container_of(fsn_mark, 51 struct dnotify_mark, 52 fsn_mark); 53 54 assert_spin_locked(&fsn_mark->lock); 55 56 for (dn = dn_mark->dn; dn != NULL; dn = dn->dn_next) 57 new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT); 58 if (fsn_mark->mask == new_mask) 59 return; 60 fsn_mark->mask = new_mask; 61 62 fsnotify_recalc_mask(fsn_mark->connector); 63 } 64 65 /* 66 * Mains fsnotify call where events are delivered to dnotify. 67 * Find the dnotify mark on the relevant inode, run the list of dnotify structs 68 * on that mark and determine which of them has expressed interest in receiving 69 * events of this type. When found send the correct process and signal and 70 * destroy the dnotify struct if it was not registered to receive multiple 71 * events. 72 */ 73 static int dnotify_handle_event(struct fsnotify_mark *inode_mark, u32 mask, 74 struct inode *inode, struct inode *dir, 75 const struct qstr *name) 76 { 77 struct dnotify_mark *dn_mark; 78 struct dnotify_struct *dn; 79 struct dnotify_struct **prev; 80 struct fown_struct *fown; 81 __u32 test_mask = mask & ~FS_EVENT_ON_CHILD; 82 83 /* not a dir, dnotify doesn't care */ 84 if (!dir && !(mask & FS_ISDIR)) 85 return 0; 86 87 dn_mark = container_of(inode_mark, struct dnotify_mark, fsn_mark); 88 89 spin_lock(&inode_mark->lock); 90 prev = &dn_mark->dn; 91 while ((dn = *prev) != NULL) { 92 if ((dn->dn_mask & test_mask) == 0) { 93 prev = &dn->dn_next; 94 continue; 95 } 96 fown = &dn->dn_filp->f_owner; 97 send_sigio(fown, dn->dn_fd, POLL_MSG); 98 if (dn->dn_mask & FS_DN_MULTISHOT) 99 prev = &dn->dn_next; 100 else { 101 *prev = dn->dn_next; 102 kmem_cache_free(dnotify_struct_cache, dn); 103 dnotify_recalc_inode_mask(inode_mark); 104 } 105 } 106 107 spin_unlock(&inode_mark->lock); 108 109 return 0; 110 } 111 112 static void dnotify_free_mark(struct fsnotify_mark *fsn_mark) 113 { 114 struct dnotify_mark *dn_mark = container_of(fsn_mark, 115 struct dnotify_mark, 116 fsn_mark); 117 118 BUG_ON(dn_mark->dn); 119 120 kmem_cache_free(dnotify_mark_cache, dn_mark); 121 } 122 123 static const struct fsnotify_ops dnotify_fsnotify_ops = { 124 .handle_inode_event = dnotify_handle_event, 125 .free_mark = dnotify_free_mark, 126 }; 127 128 /* 129 * Called every time a file is closed. Looks first for a dnotify mark on the 130 * inode. If one is found run all of the ->dn structures attached to that 131 * mark for one relevant to this process closing the file and remove that 132 * dnotify_struct. If that was the last dnotify_struct also remove the 133 * fsnotify_mark. 134 */ 135 void dnotify_flush(struct file *filp, fl_owner_t id) 136 { 137 struct fsnotify_mark *fsn_mark; 138 struct dnotify_mark *dn_mark; 139 struct dnotify_struct *dn; 140 struct dnotify_struct **prev; 141 struct inode *inode; 142 bool free = false; 143 144 inode = file_inode(filp); 145 if (!S_ISDIR(inode->i_mode)) 146 return; 147 148 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, dnotify_group); 149 if (!fsn_mark) 150 return; 151 dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); 152 153 mutex_lock(&dnotify_group->mark_mutex); 154 155 spin_lock(&fsn_mark->lock); 156 prev = &dn_mark->dn; 157 while ((dn = *prev) != NULL) { 158 if ((dn->dn_owner == id) && (dn->dn_filp == filp)) { 159 *prev = dn->dn_next; 160 kmem_cache_free(dnotify_struct_cache, dn); 161 dnotify_recalc_inode_mask(fsn_mark); 162 break; 163 } 164 prev = &dn->dn_next; 165 } 166 167 spin_unlock(&fsn_mark->lock); 168 169 /* nothing else could have found us thanks to the dnotify_groups 170 mark_mutex */ 171 if (dn_mark->dn == NULL) { 172 fsnotify_detach_mark(fsn_mark); 173 free = true; 174 } 175 176 mutex_unlock(&dnotify_group->mark_mutex); 177 178 if (free) 179 fsnotify_free_mark(fsn_mark); 180 fsnotify_put_mark(fsn_mark); 181 } 182 183 /* this conversion is done only at watch creation */ 184 static __u32 convert_arg(unsigned long arg) 185 { 186 __u32 new_mask = FS_EVENT_ON_CHILD; 187 188 if (arg & DN_MULTISHOT) 189 new_mask |= FS_DN_MULTISHOT; 190 if (arg & DN_DELETE) 191 new_mask |= (FS_DELETE | FS_MOVED_FROM); 192 if (arg & DN_MODIFY) 193 new_mask |= FS_MODIFY; 194 if (arg & DN_ACCESS) 195 new_mask |= FS_ACCESS; 196 if (arg & DN_ATTRIB) 197 new_mask |= FS_ATTRIB; 198 if (arg & DN_RENAME) 199 new_mask |= FS_DN_RENAME; 200 if (arg & DN_CREATE) 201 new_mask |= (FS_CREATE | FS_MOVED_TO); 202 203 return new_mask; 204 } 205 206 /* 207 * If multiple processes watch the same inode with dnotify there is only one 208 * dnotify mark in inode->i_fsnotify_marks but we chain a dnotify_struct 209 * onto that mark. This function either attaches the new dnotify_struct onto 210 * that list, or it |= the mask onto an existing dnofiy_struct. 211 */ 212 static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark *dn_mark, 213 fl_owner_t id, int fd, struct file *filp, __u32 mask) 214 { 215 struct dnotify_struct *odn; 216 217 odn = dn_mark->dn; 218 while (odn != NULL) { 219 /* adding more events to existing dnofiy_struct? */ 220 if ((odn->dn_owner == id) && (odn->dn_filp == filp)) { 221 odn->dn_fd = fd; 222 odn->dn_mask |= mask; 223 return -EEXIST; 224 } 225 odn = odn->dn_next; 226 } 227 228 dn->dn_mask = mask; 229 dn->dn_fd = fd; 230 dn->dn_filp = filp; 231 dn->dn_owner = id; 232 dn->dn_next = dn_mark->dn; 233 dn_mark->dn = dn; 234 235 return 0; 236 } 237 238 /* 239 * When a process calls fcntl to attach a dnotify watch to a directory it ends 240 * up here. Allocate both a mark for fsnotify to add and a dnotify_struct to be 241 * attached to the fsnotify_mark. 242 */ 243 int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) 244 { 245 struct dnotify_mark *new_dn_mark, *dn_mark; 246 struct fsnotify_mark *new_fsn_mark, *fsn_mark; 247 struct dnotify_struct *dn; 248 struct inode *inode; 249 fl_owner_t id = current->files; 250 struct file *f; 251 int destroy = 0, error = 0; 252 __u32 mask; 253 254 /* we use these to tell if we need to kfree */ 255 new_fsn_mark = NULL; 256 dn = NULL; 257 258 if (!dir_notify_enable) { 259 error = -EINVAL; 260 goto out_err; 261 } 262 263 /* a 0 mask means we are explicitly removing the watch */ 264 if ((arg & ~DN_MULTISHOT) == 0) { 265 dnotify_flush(filp, id); 266 error = 0; 267 goto out_err; 268 } 269 270 /* dnotify only works on directories */ 271 inode = file_inode(filp); 272 if (!S_ISDIR(inode->i_mode)) { 273 error = -ENOTDIR; 274 goto out_err; 275 } 276 277 /* 278 * convert the userspace DN_* "arg" to the internal FS_* 279 * defined in fsnotify 280 */ 281 mask = convert_arg(arg); 282 283 error = security_path_notify(&filp->f_path, mask, 284 FSNOTIFY_OBJ_TYPE_INODE); 285 if (error) 286 goto out_err; 287 288 /* expect most fcntl to add new rather than augment old */ 289 dn = kmem_cache_alloc(dnotify_struct_cache, GFP_KERNEL); 290 if (!dn) { 291 error = -ENOMEM; 292 goto out_err; 293 } 294 295 /* new fsnotify mark, we expect most fcntl calls to add a new mark */ 296 new_dn_mark = kmem_cache_alloc(dnotify_mark_cache, GFP_KERNEL); 297 if (!new_dn_mark) { 298 error = -ENOMEM; 299 goto out_err; 300 } 301 302 /* set up the new_fsn_mark and new_dn_mark */ 303 new_fsn_mark = &new_dn_mark->fsn_mark; 304 fsnotify_init_mark(new_fsn_mark, dnotify_group); 305 new_fsn_mark->mask = mask; 306 new_dn_mark->dn = NULL; 307 308 /* this is needed to prevent the fcntl/close race described below */ 309 mutex_lock(&dnotify_group->mark_mutex); 310 311 /* add the new_fsn_mark or find an old one. */ 312 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, dnotify_group); 313 if (fsn_mark) { 314 dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); 315 spin_lock(&fsn_mark->lock); 316 } else { 317 error = fsnotify_add_inode_mark_locked(new_fsn_mark, inode, 0); 318 if (error) { 319 mutex_unlock(&dnotify_group->mark_mutex); 320 goto out_err; 321 } 322 spin_lock(&new_fsn_mark->lock); 323 fsn_mark = new_fsn_mark; 324 dn_mark = new_dn_mark; 325 /* we used new_fsn_mark, so don't free it */ 326 new_fsn_mark = NULL; 327 } 328 329 rcu_read_lock(); 330 f = fcheck(fd); 331 rcu_read_unlock(); 332 333 /* if (f != filp) means that we lost a race and another task/thread 334 * actually closed the fd we are still playing with before we grabbed 335 * the dnotify_groups mark_mutex and fsn_mark->lock. Since closing the 336 * fd is the only time we clean up the marks we need to get our mark 337 * off the list. */ 338 if (f != filp) { 339 /* if we added ourselves, shoot ourselves, it's possible that 340 * the flush actually did shoot this fsn_mark. That's fine too 341 * since multiple calls to destroy_mark is perfectly safe, if 342 * we found a dn_mark already attached to the inode, just sod 343 * off silently as the flush at close time dealt with it. 344 */ 345 if (dn_mark == new_dn_mark) 346 destroy = 1; 347 error = 0; 348 goto out; 349 } 350 351 __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0); 352 353 error = attach_dn(dn, dn_mark, id, fd, filp, mask); 354 /* !error means that we attached the dn to the dn_mark, so don't free it */ 355 if (!error) 356 dn = NULL; 357 /* -EEXIST means that we didn't add this new dn and used an old one. 358 * that isn't an error (and the unused dn should be freed) */ 359 else if (error == -EEXIST) 360 error = 0; 361 362 dnotify_recalc_inode_mask(fsn_mark); 363 out: 364 spin_unlock(&fsn_mark->lock); 365 366 if (destroy) 367 fsnotify_detach_mark(fsn_mark); 368 mutex_unlock(&dnotify_group->mark_mutex); 369 if (destroy) 370 fsnotify_free_mark(fsn_mark); 371 fsnotify_put_mark(fsn_mark); 372 out_err: 373 if (new_fsn_mark) 374 fsnotify_put_mark(new_fsn_mark); 375 if (dn) 376 kmem_cache_free(dnotify_struct_cache, dn); 377 return error; 378 } 379 380 static int __init dnotify_init(void) 381 { 382 dnotify_struct_cache = KMEM_CACHE(dnotify_struct, 383 SLAB_PANIC|SLAB_ACCOUNT); 384 dnotify_mark_cache = KMEM_CACHE(dnotify_mark, SLAB_PANIC|SLAB_ACCOUNT); 385 386 dnotify_group = fsnotify_alloc_group(&dnotify_fsnotify_ops); 387 if (IS_ERR(dnotify_group)) 388 panic("unable to allocate fsnotify group for dnotify\n"); 389 return 0; 390 } 391 392 module_init(dnotify_init) 393