1 /* 2 * linux/fs/pnode.c 3 * 4 * (C) Copyright IBM Corporation 2005. 5 * Released under GPL v2. 6 * Author : Ram Pai (linuxram@us.ibm.com) 7 * 8 */ 9 #include <linux/mnt_namespace.h> 10 #include <linux/mount.h> 11 #include <linux/fs.h> 12 #include <linux/nsproxy.h> 13 #include <uapi/linux/mount.h> 14 #include "internal.h" 15 #include "pnode.h" 16 17 /* return the next shared peer mount of @p */ 18 static inline struct mount *next_peer(struct mount *p) 19 { 20 return list_entry(p->mnt_share.next, struct mount, mnt_share); 21 } 22 23 static inline struct mount *first_slave(struct mount *p) 24 { 25 return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave); 26 } 27 28 static inline struct mount *last_slave(struct mount *p) 29 { 30 return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave); 31 } 32 33 static inline struct mount *next_slave(struct mount *p) 34 { 35 return list_entry(p->mnt_slave.next, struct mount, mnt_slave); 36 } 37 38 static struct mount *get_peer_under_root(struct mount *mnt, 39 struct mnt_namespace *ns, 40 const struct path *root) 41 { 42 struct mount *m = mnt; 43 44 do { 45 /* Check the namespace first for optimization */ 46 if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root)) 47 return m; 48 49 m = next_peer(m); 50 } while (m != mnt); 51 52 return NULL; 53 } 54 55 /* 56 * Get ID of closest dominating peer group having a representative 57 * under the given root. 58 * 59 * Caller must hold namespace_sem 60 */ 61 int get_dominating_id(struct mount *mnt, const struct path *root) 62 { 63 struct mount *m; 64 65 for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) { 66 struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root); 67 if (d) 68 return d->mnt_group_id; 69 } 70 71 return 0; 72 } 73 74 static int do_make_slave(struct mount *mnt) 75 { 76 struct mount *master, *slave_mnt; 77 78 if (list_empty(&mnt->mnt_share)) { 79 if (IS_MNT_SHARED(mnt)) { 80 mnt_release_group_id(mnt); 81 CLEAR_MNT_SHARED(mnt); 82 } 83 master = mnt->mnt_master; 84 if (!master) { 85 struct list_head *p = &mnt->mnt_slave_list; 86 while (!list_empty(p)) { 87 slave_mnt = list_first_entry(p, 88 struct mount, mnt_slave); 89 list_del_init(&slave_mnt->mnt_slave); 90 slave_mnt->mnt_master = NULL; 91 } 92 return 0; 93 } 94 } else { 95 struct mount *m; 96 /* 97 * slave 'mnt' to a peer mount that has the 98 * same root dentry. If none is available then 99 * slave it to anything that is available. 100 */ 101 for (m = master = next_peer(mnt); m != mnt; m = next_peer(m)) { 102 if (m->mnt.mnt_root == mnt->mnt.mnt_root) { 103 master = m; 104 break; 105 } 106 } 107 list_del_init(&mnt->mnt_share); 108 mnt->mnt_group_id = 0; 109 CLEAR_MNT_SHARED(mnt); 110 } 111 list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave) 112 slave_mnt->mnt_master = master; 113 list_move(&mnt->mnt_slave, &master->mnt_slave_list); 114 list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev); 115 INIT_LIST_HEAD(&mnt->mnt_slave_list); 116 mnt->mnt_master = master; 117 return 0; 118 } 119 120 /* 121 * vfsmount lock must be held for write 122 */ 123 void change_mnt_propagation(struct mount *mnt, int type) 124 { 125 if (type == MS_SHARED) { 126 set_mnt_shared(mnt); 127 return; 128 } 129 do_make_slave(mnt); 130 if (type != MS_SLAVE) { 131 list_del_init(&mnt->mnt_slave); 132 mnt->mnt_master = NULL; 133 if (type == MS_UNBINDABLE) 134 mnt->mnt.mnt_flags |= MNT_UNBINDABLE; 135 else 136 mnt->mnt.mnt_flags &= ~MNT_UNBINDABLE; 137 } 138 } 139 140 /* 141 * get the next mount in the propagation tree. 142 * @m: the mount seen last 143 * @origin: the original mount from where the tree walk initiated 144 * 145 * Note that peer groups form contiguous segments of slave lists. 146 * We rely on that in get_source() to be able to find out if 147 * vfsmount found while iterating with propagation_next() is 148 * a peer of one we'd found earlier. 149 */ 150 static struct mount *propagation_next(struct mount *m, 151 struct mount *origin) 152 { 153 /* are there any slaves of this mount? */ 154 if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) 155 return first_slave(m); 156 157 while (1) { 158 struct mount *master = m->mnt_master; 159 160 if (master == origin->mnt_master) { 161 struct mount *next = next_peer(m); 162 return (next == origin) ? NULL : next; 163 } else if (m->mnt_slave.next != &master->mnt_slave_list) 164 return next_slave(m); 165 166 /* back at master */ 167 m = master; 168 } 169 } 170 171 static struct mount *skip_propagation_subtree(struct mount *m, 172 struct mount *origin) 173 { 174 /* 175 * Advance m such that propagation_next will not return 176 * the slaves of m. 177 */ 178 if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) 179 m = last_slave(m); 180 181 return m; 182 } 183 184 static struct mount *next_group(struct mount *m, struct mount *origin) 185 { 186 while (1) { 187 while (1) { 188 struct mount *next; 189 if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) 190 return first_slave(m); 191 next = next_peer(m); 192 if (m->mnt_group_id == origin->mnt_group_id) { 193 if (next == origin) 194 return NULL; 195 } else if (m->mnt_slave.next != &next->mnt_slave) 196 break; 197 m = next; 198 } 199 /* m is the last peer */ 200 while (1) { 201 struct mount *master = m->mnt_master; 202 if (m->mnt_slave.next != &master->mnt_slave_list) 203 return next_slave(m); 204 m = next_peer(master); 205 if (master->mnt_group_id == origin->mnt_group_id) 206 break; 207 if (master->mnt_slave.next == &m->mnt_slave) 208 break; 209 m = master; 210 } 211 if (m == origin) 212 return NULL; 213 } 214 } 215 216 /* all accesses are serialized by namespace_sem */ 217 static struct user_namespace *user_ns; 218 static struct mount *last_dest, *first_source, *last_source, *dest_master; 219 static struct mountpoint *mp; 220 static struct hlist_head *list; 221 222 static inline bool peers(struct mount *m1, struct mount *m2) 223 { 224 return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id; 225 } 226 227 static int propagate_one(struct mount *m) 228 { 229 struct mount *child; 230 int type; 231 /* skip ones added by this propagate_mnt() */ 232 if (IS_MNT_NEW(m)) 233 return 0; 234 /* skip if mountpoint isn't covered by it */ 235 if (!is_subdir(mp->m_dentry, m->mnt.mnt_root)) 236 return 0; 237 if (peers(m, last_dest)) { 238 type = CL_MAKE_SHARED; 239 } else { 240 struct mount *n, *p; 241 bool done; 242 for (n = m; ; n = p) { 243 p = n->mnt_master; 244 if (p == dest_master || IS_MNT_MARKED(p)) 245 break; 246 } 247 do { 248 struct mount *parent = last_source->mnt_parent; 249 if (last_source == first_source) 250 break; 251 done = parent->mnt_master == p; 252 if (done && peers(n, parent)) 253 break; 254 last_source = last_source->mnt_master; 255 } while (!done); 256 257 type = CL_SLAVE; 258 /* beginning of peer group among the slaves? */ 259 if (IS_MNT_SHARED(m)) 260 type |= CL_MAKE_SHARED; 261 } 262 263 /* Notice when we are propagating across user namespaces */ 264 if (m->mnt_ns->user_ns != user_ns) 265 type |= CL_UNPRIVILEGED; 266 child = copy_tree(last_source, last_source->mnt.mnt_root, type); 267 if (IS_ERR(child)) 268 return PTR_ERR(child); 269 child->mnt.mnt_flags &= ~MNT_LOCKED; 270 mnt_set_mountpoint(m, mp, child); 271 last_dest = m; 272 last_source = child; 273 if (m->mnt_master != dest_master) { 274 read_seqlock_excl(&mount_lock); 275 SET_MNT_MARK(m->mnt_master); 276 read_sequnlock_excl(&mount_lock); 277 } 278 hlist_add_head(&child->mnt_hash, list); 279 return count_mounts(m->mnt_ns, child); 280 } 281 282 /* 283 * mount 'source_mnt' under the destination 'dest_mnt' at 284 * dentry 'dest_dentry'. And propagate that mount to 285 * all the peer and slave mounts of 'dest_mnt'. 286 * Link all the new mounts into a propagation tree headed at 287 * source_mnt. Also link all the new mounts using ->mnt_list 288 * headed at source_mnt's ->mnt_list 289 * 290 * @dest_mnt: destination mount. 291 * @dest_dentry: destination dentry. 292 * @source_mnt: source mount. 293 * @tree_list : list of heads of trees to be attached. 294 */ 295 int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, 296 struct mount *source_mnt, struct hlist_head *tree_list) 297 { 298 struct mount *m, *n; 299 int ret = 0; 300 301 /* 302 * we don't want to bother passing tons of arguments to 303 * propagate_one(); everything is serialized by namespace_sem, 304 * so globals will do just fine. 305 */ 306 user_ns = current->nsproxy->mnt_ns->user_ns; 307 last_dest = dest_mnt; 308 first_source = source_mnt; 309 last_source = source_mnt; 310 mp = dest_mp; 311 list = tree_list; 312 dest_master = dest_mnt->mnt_master; 313 314 /* all peers of dest_mnt, except dest_mnt itself */ 315 for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) { 316 ret = propagate_one(n); 317 if (ret) 318 goto out; 319 } 320 321 /* all slave groups */ 322 for (m = next_group(dest_mnt, dest_mnt); m; 323 m = next_group(m, dest_mnt)) { 324 /* everything in that slave group */ 325 n = m; 326 do { 327 ret = propagate_one(n); 328 if (ret) 329 goto out; 330 n = next_peer(n); 331 } while (n != m); 332 } 333 out: 334 read_seqlock_excl(&mount_lock); 335 hlist_for_each_entry(n, tree_list, mnt_hash) { 336 m = n->mnt_parent; 337 if (m->mnt_master != dest_mnt->mnt_master) 338 CLEAR_MNT_MARK(m->mnt_master); 339 } 340 read_sequnlock_excl(&mount_lock); 341 return ret; 342 } 343 344 static struct mount *find_topper(struct mount *mnt) 345 { 346 /* If there is exactly one mount covering mnt completely return it. */ 347 struct mount *child; 348 349 if (!list_is_singular(&mnt->mnt_mounts)) 350 return NULL; 351 352 child = list_first_entry(&mnt->mnt_mounts, struct mount, mnt_child); 353 if (child->mnt_mountpoint != mnt->mnt.mnt_root) 354 return NULL; 355 356 return child; 357 } 358 359 /* 360 * return true if the refcount is greater than count 361 */ 362 static inline int do_refcount_check(struct mount *mnt, int count) 363 { 364 return mnt_get_count(mnt) > count; 365 } 366 367 /* 368 * check if the mount 'mnt' can be unmounted successfully. 369 * @mnt: the mount to be checked for unmount 370 * NOTE: unmounting 'mnt' would naturally propagate to all 371 * other mounts its parent propagates to. 372 * Check if any of these mounts that **do not have submounts** 373 * have more references than 'refcnt'. If so return busy. 374 * 375 * vfsmount lock must be held for write 376 */ 377 int propagate_mount_busy(struct mount *mnt, int refcnt) 378 { 379 struct mount *m, *child, *topper; 380 struct mount *parent = mnt->mnt_parent; 381 382 if (mnt == parent) 383 return do_refcount_check(mnt, refcnt); 384 385 /* 386 * quickly check if the current mount can be unmounted. 387 * If not, we don't have to go checking for all other 388 * mounts 389 */ 390 if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt)) 391 return 1; 392 393 for (m = propagation_next(parent, parent); m; 394 m = propagation_next(m, parent)) { 395 int count = 1; 396 child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint); 397 if (!child) 398 continue; 399 400 /* Is there exactly one mount on the child that covers 401 * it completely whose reference should be ignored? 402 */ 403 topper = find_topper(child); 404 if (topper) 405 count += 1; 406 else if (!list_empty(&child->mnt_mounts)) 407 continue; 408 409 if (do_refcount_check(child, count)) 410 return 1; 411 } 412 return 0; 413 } 414 415 /* 416 * Clear MNT_LOCKED when it can be shown to be safe. 417 * 418 * mount_lock lock must be held for write 419 */ 420 void propagate_mount_unlock(struct mount *mnt) 421 { 422 struct mount *parent = mnt->mnt_parent; 423 struct mount *m, *child; 424 425 BUG_ON(parent == mnt); 426 427 for (m = propagation_next(parent, parent); m; 428 m = propagation_next(m, parent)) { 429 child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint); 430 if (child) 431 child->mnt.mnt_flags &= ~MNT_LOCKED; 432 } 433 } 434 435 static void umount_one(struct mount *mnt, struct list_head *to_umount) 436 { 437 CLEAR_MNT_MARK(mnt); 438 mnt->mnt.mnt_flags |= MNT_UMOUNT; 439 list_del_init(&mnt->mnt_child); 440 list_del_init(&mnt->mnt_umounting); 441 list_move_tail(&mnt->mnt_list, to_umount); 442 } 443 444 /* 445 * NOTE: unmounting 'mnt' naturally propagates to all other mounts its 446 * parent propagates to. 447 */ 448 static bool __propagate_umount(struct mount *mnt, 449 struct list_head *to_umount, 450 struct list_head *to_restore) 451 { 452 bool progress = false; 453 struct mount *child; 454 455 /* 456 * The state of the parent won't change if this mount is 457 * already unmounted or marked as without children. 458 */ 459 if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED)) 460 goto out; 461 462 /* Verify topper is the only grandchild that has not been 463 * speculatively unmounted. 464 */ 465 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { 466 if (child->mnt_mountpoint == mnt->mnt.mnt_root) 467 continue; 468 if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child)) 469 continue; 470 /* Found a mounted child */ 471 goto children; 472 } 473 474 /* Mark mounts that can be unmounted if not locked */ 475 SET_MNT_MARK(mnt); 476 progress = true; 477 478 /* If a mount is without children and not locked umount it. */ 479 if (!IS_MNT_LOCKED(mnt)) { 480 umount_one(mnt, to_umount); 481 } else { 482 children: 483 list_move_tail(&mnt->mnt_umounting, to_restore); 484 } 485 out: 486 return progress; 487 } 488 489 static void umount_list(struct list_head *to_umount, 490 struct list_head *to_restore) 491 { 492 struct mount *mnt, *child, *tmp; 493 list_for_each_entry(mnt, to_umount, mnt_list) { 494 list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) { 495 /* topper? */ 496 if (child->mnt_mountpoint == mnt->mnt.mnt_root) 497 list_move_tail(&child->mnt_umounting, to_restore); 498 else 499 umount_one(child, to_umount); 500 } 501 } 502 } 503 504 static void restore_mounts(struct list_head *to_restore) 505 { 506 /* Restore mounts to a clean working state */ 507 while (!list_empty(to_restore)) { 508 struct mount *mnt, *parent; 509 struct mountpoint *mp; 510 511 mnt = list_first_entry(to_restore, struct mount, mnt_umounting); 512 CLEAR_MNT_MARK(mnt); 513 list_del_init(&mnt->mnt_umounting); 514 515 /* Should this mount be reparented? */ 516 mp = mnt->mnt_mp; 517 parent = mnt->mnt_parent; 518 while (parent->mnt.mnt_flags & MNT_UMOUNT) { 519 mp = parent->mnt_mp; 520 parent = parent->mnt_parent; 521 } 522 if (parent != mnt->mnt_parent) 523 mnt_change_mountpoint(parent, mp, mnt); 524 } 525 } 526 527 static void cleanup_umount_visitations(struct list_head *visited) 528 { 529 while (!list_empty(visited)) { 530 struct mount *mnt = 531 list_first_entry(visited, struct mount, mnt_umounting); 532 list_del_init(&mnt->mnt_umounting); 533 } 534 } 535 536 /* 537 * collect all mounts that receive propagation from the mount in @list, 538 * and return these additional mounts in the same list. 539 * @list: the list of mounts to be unmounted. 540 * 541 * vfsmount lock must be held for write 542 */ 543 int propagate_umount(struct list_head *list) 544 { 545 struct mount *mnt; 546 LIST_HEAD(to_restore); 547 LIST_HEAD(to_umount); 548 LIST_HEAD(visited); 549 550 /* Find candidates for unmounting */ 551 list_for_each_entry_reverse(mnt, list, mnt_list) { 552 struct mount *parent = mnt->mnt_parent; 553 struct mount *m; 554 555 /* 556 * If this mount has already been visited it is known that it's 557 * entire peer group and all of their slaves in the propagation 558 * tree for the mountpoint has already been visited and there is 559 * no need to visit them again. 560 */ 561 if (!list_empty(&mnt->mnt_umounting)) 562 continue; 563 564 list_add_tail(&mnt->mnt_umounting, &visited); 565 for (m = propagation_next(parent, parent); m; 566 m = propagation_next(m, parent)) { 567 struct mount *child = __lookup_mnt(&m->mnt, 568 mnt->mnt_mountpoint); 569 if (!child) 570 continue; 571 572 if (!list_empty(&child->mnt_umounting)) { 573 /* 574 * If the child has already been visited it is 575 * know that it's entire peer group and all of 576 * their slaves in the propgation tree for the 577 * mountpoint has already been visited and there 578 * is no need to visit this subtree again. 579 */ 580 m = skip_propagation_subtree(m, parent); 581 continue; 582 } else if (child->mnt.mnt_flags & MNT_UMOUNT) { 583 /* 584 * We have come accross an partially unmounted 585 * mount in list that has not been visited yet. 586 * Remember it has been visited and continue 587 * about our merry way. 588 */ 589 list_add_tail(&child->mnt_umounting, &visited); 590 continue; 591 } 592 593 /* Check the child and parents while progress is made */ 594 while (__propagate_umount(child, 595 &to_umount, &to_restore)) { 596 /* Is the parent a umount candidate? */ 597 child = child->mnt_parent; 598 if (list_empty(&child->mnt_umounting)) 599 break; 600 } 601 } 602 } 603 604 umount_list(&to_umount, &to_restore); 605 restore_mounts(&to_restore); 606 cleanup_umount_visitations(&visited); 607 list_splice_tail(&to_umount, list); 608 609 return 0; 610 } 611