1 /* 2 * linux/fs/namespace.c 3 * 4 * (C) Copyright Al Viro 2000, 2001 5 * Released under GPL v2. 6 * 7 * Based on code from fs/super.c, copyright Linus Torvalds and others. 8 * Heavily rewritten. 9 */ 10 11 #include <linux/syscalls.h> 12 #include <linux/export.h> 13 #include <linux/capability.h> 14 #include <linux/mnt_namespace.h> 15 #include <linux/user_namespace.h> 16 #include <linux/namei.h> 17 #include <linux/security.h> 18 #include <linux/idr.h> 19 #include <linux/init.h> /* init_rootfs */ 20 #include <linux/fs_struct.h> /* get_fs_root et.al. */ 21 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */ 22 #include <linux/uaccess.h> 23 #include <linux/proc_ns.h> 24 #include <linux/magic.h> 25 #include <linux/bootmem.h> 26 #include <linux/task_work.h> 27 #include "pnode.h" 28 #include "internal.h" 29 30 static unsigned int m_hash_mask __read_mostly; 31 static unsigned int m_hash_shift __read_mostly; 32 static unsigned int mp_hash_mask __read_mostly; 33 static unsigned int mp_hash_shift __read_mostly; 34 35 static __initdata unsigned long mhash_entries; 36 static int __init set_mhash_entries(char *str) 37 { 38 if (!str) 39 return 0; 40 mhash_entries = simple_strtoul(str, &str, 0); 41 return 1; 42 } 43 __setup("mhash_entries=", set_mhash_entries); 44 45 static __initdata unsigned long mphash_entries; 46 static int __init set_mphash_entries(char *str) 47 { 48 if (!str) 49 return 0; 50 mphash_entries = simple_strtoul(str, &str, 0); 51 return 1; 52 } 53 __setup("mphash_entries=", set_mphash_entries); 54 55 static u64 event; 56 static DEFINE_IDA(mnt_id_ida); 57 static DEFINE_IDA(mnt_group_ida); 58 static DEFINE_SPINLOCK(mnt_id_lock); 59 static int mnt_id_start = 0; 60 static int mnt_group_start = 1; 61 62 static struct hlist_head *mount_hashtable __read_mostly; 63 static struct hlist_head *mountpoint_hashtable __read_mostly; 64 static struct kmem_cache *mnt_cache __read_mostly; 65 static DECLARE_RWSEM(namespace_sem); 66 67 /* /sys/fs */ 68 struct kobject *fs_kobj; 69 EXPORT_SYMBOL_GPL(fs_kobj); 70 71 /* 72 * vfsmount lock may be taken for read to prevent changes to the 73 * vfsmount hash, ie. during mountpoint lookups or walking back 74 * up the tree. 75 * 76 * It should be taken for write in all cases where the vfsmount 77 * tree or hash is modified or when a vfsmount structure is modified. 78 */ 79 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); 80 81 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry) 82 { 83 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); 84 tmp += ((unsigned long)dentry / L1_CACHE_BYTES); 85 tmp = tmp + (tmp >> m_hash_shift); 86 return &mount_hashtable[tmp & m_hash_mask]; 87 } 88 89 static inline struct hlist_head *mp_hash(struct dentry *dentry) 90 { 91 unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES); 92 tmp = tmp + (tmp >> mp_hash_shift); 93 return &mountpoint_hashtable[tmp & mp_hash_mask]; 94 } 95 96 /* 97 * allocation is serialized by namespace_sem, but we need the spinlock to 98 * serialize with freeing. 99 */ 100 static int mnt_alloc_id(struct mount *mnt) 101 { 102 int res; 103 104 retry: 105 ida_pre_get(&mnt_id_ida, GFP_KERNEL); 106 spin_lock(&mnt_id_lock); 107 res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id); 108 if (!res) 109 mnt_id_start = mnt->mnt_id + 1; 110 spin_unlock(&mnt_id_lock); 111 if (res == -EAGAIN) 112 goto retry; 113 114 return res; 115 } 116 117 static void mnt_free_id(struct mount *mnt) 118 { 119 int id = mnt->mnt_id; 120 spin_lock(&mnt_id_lock); 121 ida_remove(&mnt_id_ida, id); 122 if (mnt_id_start > id) 123 mnt_id_start = id; 124 spin_unlock(&mnt_id_lock); 125 } 126 127 /* 128 * Allocate a new peer group ID 129 * 130 * mnt_group_ida is protected by namespace_sem 131 */ 132 static int mnt_alloc_group_id(struct mount *mnt) 133 { 134 int res; 135 136 if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL)) 137 return -ENOMEM; 138 139 res = ida_get_new_above(&mnt_group_ida, 140 mnt_group_start, 141 &mnt->mnt_group_id); 142 if (!res) 143 mnt_group_start = mnt->mnt_group_id + 1; 144 145 return res; 146 } 147 148 /* 149 * Release a peer group ID 150 */ 151 void mnt_release_group_id(struct mount *mnt) 152 { 153 int id = mnt->mnt_group_id; 154 ida_remove(&mnt_group_ida, id); 155 if (mnt_group_start > id) 156 mnt_group_start = id; 157 mnt->mnt_group_id = 0; 158 } 159 160 /* 161 * vfsmount lock must be held for read 162 */ 163 static inline void mnt_add_count(struct mount *mnt, int n) 164 { 165 #ifdef CONFIG_SMP 166 this_cpu_add(mnt->mnt_pcp->mnt_count, n); 167 #else 168 preempt_disable(); 169 mnt->mnt_count += n; 170 preempt_enable(); 171 #endif 172 } 173 174 /* 175 * vfsmount lock must be held for write 176 */ 177 unsigned int mnt_get_count(struct mount *mnt) 178 { 179 #ifdef CONFIG_SMP 180 unsigned int count = 0; 181 int cpu; 182 183 for_each_possible_cpu(cpu) { 184 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count; 185 } 186 187 return count; 188 #else 189 return mnt->mnt_count; 190 #endif 191 } 192 193 static void drop_mountpoint(struct fs_pin *p) 194 { 195 struct mount *m = container_of(p, struct mount, mnt_umount); 196 dput(m->mnt_ex_mountpoint); 197 pin_remove(p); 198 mntput(&m->mnt); 199 } 200 201 static struct mount *alloc_vfsmnt(const char *name) 202 { 203 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); 204 if (mnt) { 205 int err; 206 207 err = mnt_alloc_id(mnt); 208 if (err) 209 goto out_free_cache; 210 211 if (name) { 212 mnt->mnt_devname = kstrdup_const(name, GFP_KERNEL); 213 if (!mnt->mnt_devname) 214 goto out_free_id; 215 } 216 217 #ifdef CONFIG_SMP 218 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp); 219 if (!mnt->mnt_pcp) 220 goto out_free_devname; 221 222 this_cpu_add(mnt->mnt_pcp->mnt_count, 1); 223 #else 224 mnt->mnt_count = 1; 225 mnt->mnt_writers = 0; 226 #endif 227 228 INIT_HLIST_NODE(&mnt->mnt_hash); 229 INIT_LIST_HEAD(&mnt->mnt_child); 230 INIT_LIST_HEAD(&mnt->mnt_mounts); 231 INIT_LIST_HEAD(&mnt->mnt_list); 232 INIT_LIST_HEAD(&mnt->mnt_expire); 233 INIT_LIST_HEAD(&mnt->mnt_share); 234 INIT_LIST_HEAD(&mnt->mnt_slave_list); 235 INIT_LIST_HEAD(&mnt->mnt_slave); 236 INIT_HLIST_NODE(&mnt->mnt_mp_list); 237 #ifdef CONFIG_FSNOTIFY 238 INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks); 239 #endif 240 init_fs_pin(&mnt->mnt_umount, drop_mountpoint); 241 } 242 return mnt; 243 244 #ifdef CONFIG_SMP 245 out_free_devname: 246 kfree_const(mnt->mnt_devname); 247 #endif 248 out_free_id: 249 mnt_free_id(mnt); 250 out_free_cache: 251 kmem_cache_free(mnt_cache, mnt); 252 return NULL; 253 } 254 255 /* 256 * Most r/o checks on a fs are for operations that take 257 * discrete amounts of time, like a write() or unlink(). 258 * We must keep track of when those operations start 259 * (for permission checks) and when they end, so that 260 * we can determine when writes are able to occur to 261 * a filesystem. 262 */ 263 /* 264 * __mnt_is_readonly: check whether a mount is read-only 265 * @mnt: the mount to check for its write status 266 * 267 * This shouldn't be used directly ouside of the VFS. 268 * It does not guarantee that the filesystem will stay 269 * r/w, just that it is right *now*. This can not and 270 * should not be used in place of IS_RDONLY(inode). 271 * mnt_want/drop_write() will _keep_ the filesystem 272 * r/w. 273 */ 274 int __mnt_is_readonly(struct vfsmount *mnt) 275 { 276 if (mnt->mnt_flags & MNT_READONLY) 277 return 1; 278 if (mnt->mnt_sb->s_flags & MS_RDONLY) 279 return 1; 280 return 0; 281 } 282 EXPORT_SYMBOL_GPL(__mnt_is_readonly); 283 284 static inline void mnt_inc_writers(struct mount *mnt) 285 { 286 #ifdef CONFIG_SMP 287 this_cpu_inc(mnt->mnt_pcp->mnt_writers); 288 #else 289 mnt->mnt_writers++; 290 #endif 291 } 292 293 static inline void mnt_dec_writers(struct mount *mnt) 294 { 295 #ifdef CONFIG_SMP 296 this_cpu_dec(mnt->mnt_pcp->mnt_writers); 297 #else 298 mnt->mnt_writers--; 299 #endif 300 } 301 302 static unsigned int mnt_get_writers(struct mount *mnt) 303 { 304 #ifdef CONFIG_SMP 305 unsigned int count = 0; 306 int cpu; 307 308 for_each_possible_cpu(cpu) { 309 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers; 310 } 311 312 return count; 313 #else 314 return mnt->mnt_writers; 315 #endif 316 } 317 318 static int mnt_is_readonly(struct vfsmount *mnt) 319 { 320 if (mnt->mnt_sb->s_readonly_remount) 321 return 1; 322 /* Order wrt setting s_flags/s_readonly_remount in do_remount() */ 323 smp_rmb(); 324 return __mnt_is_readonly(mnt); 325 } 326 327 /* 328 * Most r/o & frozen checks on a fs are for operations that take discrete 329 * amounts of time, like a write() or unlink(). We must keep track of when 330 * those operations start (for permission checks) and when they end, so that we 331 * can determine when writes are able to occur to a filesystem. 332 */ 333 /** 334 * __mnt_want_write - get write access to a mount without freeze protection 335 * @m: the mount on which to take a write 336 * 337 * This tells the low-level filesystem that a write is about to be performed to 338 * it, and makes sure that writes are allowed (mnt it read-write) before 339 * returning success. This operation does not protect against filesystem being 340 * frozen. When the write operation is finished, __mnt_drop_write() must be 341 * called. This is effectively a refcount. 342 */ 343 int __mnt_want_write(struct vfsmount *m) 344 { 345 struct mount *mnt = real_mount(m); 346 int ret = 0; 347 348 preempt_disable(); 349 mnt_inc_writers(mnt); 350 /* 351 * The store to mnt_inc_writers must be visible before we pass 352 * MNT_WRITE_HOLD loop below, so that the slowpath can see our 353 * incremented count after it has set MNT_WRITE_HOLD. 354 */ 355 smp_mb(); 356 while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) 357 cpu_relax(); 358 /* 359 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will 360 * be set to match its requirements. So we must not load that until 361 * MNT_WRITE_HOLD is cleared. 362 */ 363 smp_rmb(); 364 if (mnt_is_readonly(m)) { 365 mnt_dec_writers(mnt); 366 ret = -EROFS; 367 } 368 preempt_enable(); 369 370 return ret; 371 } 372 373 /** 374 * mnt_want_write - get write access to a mount 375 * @m: the mount on which to take a write 376 * 377 * This tells the low-level filesystem that a write is about to be performed to 378 * it, and makes sure that writes are allowed (mount is read-write, filesystem 379 * is not frozen) before returning success. When the write operation is 380 * finished, mnt_drop_write() must be called. This is effectively a refcount. 381 */ 382 int mnt_want_write(struct vfsmount *m) 383 { 384 int ret; 385 386 sb_start_write(m->mnt_sb); 387 ret = __mnt_want_write(m); 388 if (ret) 389 sb_end_write(m->mnt_sb); 390 return ret; 391 } 392 EXPORT_SYMBOL_GPL(mnt_want_write); 393 394 /** 395 * mnt_clone_write - get write access to a mount 396 * @mnt: the mount on which to take a write 397 * 398 * This is effectively like mnt_want_write, except 399 * it must only be used to take an extra write reference 400 * on a mountpoint that we already know has a write reference 401 * on it. This allows some optimisation. 402 * 403 * After finished, mnt_drop_write must be called as usual to 404 * drop the reference. 405 */ 406 int mnt_clone_write(struct vfsmount *mnt) 407 { 408 /* superblock may be r/o */ 409 if (__mnt_is_readonly(mnt)) 410 return -EROFS; 411 preempt_disable(); 412 mnt_inc_writers(real_mount(mnt)); 413 preempt_enable(); 414 return 0; 415 } 416 EXPORT_SYMBOL_GPL(mnt_clone_write); 417 418 /** 419 * __mnt_want_write_file - get write access to a file's mount 420 * @file: the file who's mount on which to take a write 421 * 422 * This is like __mnt_want_write, but it takes a file and can 423 * do some optimisations if the file is open for write already 424 */ 425 int __mnt_want_write_file(struct file *file) 426 { 427 if (!(file->f_mode & FMODE_WRITER)) 428 return __mnt_want_write(file->f_path.mnt); 429 else 430 return mnt_clone_write(file->f_path.mnt); 431 } 432 433 /** 434 * mnt_want_write_file - get write access to a file's mount 435 * @file: the file who's mount on which to take a write 436 * 437 * This is like mnt_want_write, but it takes a file and can 438 * do some optimisations if the file is open for write already 439 */ 440 int mnt_want_write_file(struct file *file) 441 { 442 int ret; 443 444 sb_start_write(file->f_path.mnt->mnt_sb); 445 ret = __mnt_want_write_file(file); 446 if (ret) 447 sb_end_write(file->f_path.mnt->mnt_sb); 448 return ret; 449 } 450 EXPORT_SYMBOL_GPL(mnt_want_write_file); 451 452 /** 453 * __mnt_drop_write - give up write access to a mount 454 * @mnt: the mount on which to give up write access 455 * 456 * Tells the low-level filesystem that we are done 457 * performing writes to it. Must be matched with 458 * __mnt_want_write() call above. 459 */ 460 void __mnt_drop_write(struct vfsmount *mnt) 461 { 462 preempt_disable(); 463 mnt_dec_writers(real_mount(mnt)); 464 preempt_enable(); 465 } 466 467 /** 468 * mnt_drop_write - give up write access to a mount 469 * @mnt: the mount on which to give up write access 470 * 471 * Tells the low-level filesystem that we are done performing writes to it and 472 * also allows filesystem to be frozen again. Must be matched with 473 * mnt_want_write() call above. 474 */ 475 void mnt_drop_write(struct vfsmount *mnt) 476 { 477 __mnt_drop_write(mnt); 478 sb_end_write(mnt->mnt_sb); 479 } 480 EXPORT_SYMBOL_GPL(mnt_drop_write); 481 482 void __mnt_drop_write_file(struct file *file) 483 { 484 __mnt_drop_write(file->f_path.mnt); 485 } 486 487 void mnt_drop_write_file(struct file *file) 488 { 489 mnt_drop_write(file->f_path.mnt); 490 } 491 EXPORT_SYMBOL(mnt_drop_write_file); 492 493 static int mnt_make_readonly(struct mount *mnt) 494 { 495 int ret = 0; 496 497 lock_mount_hash(); 498 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; 499 /* 500 * After storing MNT_WRITE_HOLD, we'll read the counters. This store 501 * should be visible before we do. 502 */ 503 smp_mb(); 504 505 /* 506 * With writers on hold, if this value is zero, then there are 507 * definitely no active writers (although held writers may subsequently 508 * increment the count, they'll have to wait, and decrement it after 509 * seeing MNT_READONLY). 510 * 511 * It is OK to have counter incremented on one CPU and decremented on 512 * another: the sum will add up correctly. The danger would be when we 513 * sum up each counter, if we read a counter before it is incremented, 514 * but then read another CPU's count which it has been subsequently 515 * decremented from -- we would see more decrements than we should. 516 * MNT_WRITE_HOLD protects against this scenario, because 517 * mnt_want_write first increments count, then smp_mb, then spins on 518 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while 519 * we're counting up here. 520 */ 521 if (mnt_get_writers(mnt) > 0) 522 ret = -EBUSY; 523 else 524 mnt->mnt.mnt_flags |= MNT_READONLY; 525 /* 526 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers 527 * that become unheld will see MNT_READONLY. 528 */ 529 smp_wmb(); 530 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; 531 unlock_mount_hash(); 532 return ret; 533 } 534 535 static void __mnt_unmake_readonly(struct mount *mnt) 536 { 537 lock_mount_hash(); 538 mnt->mnt.mnt_flags &= ~MNT_READONLY; 539 unlock_mount_hash(); 540 } 541 542 int sb_prepare_remount_readonly(struct super_block *sb) 543 { 544 struct mount *mnt; 545 int err = 0; 546 547 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */ 548 if (atomic_long_read(&sb->s_remove_count)) 549 return -EBUSY; 550 551 lock_mount_hash(); 552 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { 553 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) { 554 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; 555 smp_mb(); 556 if (mnt_get_writers(mnt) > 0) { 557 err = -EBUSY; 558 break; 559 } 560 } 561 } 562 if (!err && atomic_long_read(&sb->s_remove_count)) 563 err = -EBUSY; 564 565 if (!err) { 566 sb->s_readonly_remount = 1; 567 smp_wmb(); 568 } 569 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { 570 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) 571 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; 572 } 573 unlock_mount_hash(); 574 575 return err; 576 } 577 578 static void free_vfsmnt(struct mount *mnt) 579 { 580 kfree_const(mnt->mnt_devname); 581 #ifdef CONFIG_SMP 582 free_percpu(mnt->mnt_pcp); 583 #endif 584 kmem_cache_free(mnt_cache, mnt); 585 } 586 587 static void delayed_free_vfsmnt(struct rcu_head *head) 588 { 589 free_vfsmnt(container_of(head, struct mount, mnt_rcu)); 590 } 591 592 /* call under rcu_read_lock */ 593 int __legitimize_mnt(struct vfsmount *bastard, unsigned seq) 594 { 595 struct mount *mnt; 596 if (read_seqretry(&mount_lock, seq)) 597 return 1; 598 if (bastard == NULL) 599 return 0; 600 mnt = real_mount(bastard); 601 mnt_add_count(mnt, 1); 602 if (likely(!read_seqretry(&mount_lock, seq))) 603 return 0; 604 if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { 605 mnt_add_count(mnt, -1); 606 return 1; 607 } 608 return -1; 609 } 610 611 /* call under rcu_read_lock */ 612 bool legitimize_mnt(struct vfsmount *bastard, unsigned seq) 613 { 614 int res = __legitimize_mnt(bastard, seq); 615 if (likely(!res)) 616 return true; 617 if (unlikely(res < 0)) { 618 rcu_read_unlock(); 619 mntput(bastard); 620 rcu_read_lock(); 621 } 622 return false; 623 } 624 625 /* 626 * find the first mount at @dentry on vfsmount @mnt. 627 * call under rcu_read_lock() 628 */ 629 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) 630 { 631 struct hlist_head *head = m_hash(mnt, dentry); 632 struct mount *p; 633 634 hlist_for_each_entry_rcu(p, head, mnt_hash) 635 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) 636 return p; 637 return NULL; 638 } 639 640 /* 641 * find the last mount at @dentry on vfsmount @mnt. 642 * mount_lock must be held. 643 */ 644 struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry) 645 { 646 struct mount *p, *res = NULL; 647 p = __lookup_mnt(mnt, dentry); 648 if (!p) 649 goto out; 650 if (!(p->mnt.mnt_flags & MNT_UMOUNT)) 651 res = p; 652 hlist_for_each_entry_continue(p, mnt_hash) { 653 if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry) 654 break; 655 if (!(p->mnt.mnt_flags & MNT_UMOUNT)) 656 res = p; 657 } 658 out: 659 return res; 660 } 661 662 /* 663 * lookup_mnt - Return the first child mount mounted at path 664 * 665 * "First" means first mounted chronologically. If you create the 666 * following mounts: 667 * 668 * mount /dev/sda1 /mnt 669 * mount /dev/sda2 /mnt 670 * mount /dev/sda3 /mnt 671 * 672 * Then lookup_mnt() on the base /mnt dentry in the root mount will 673 * return successively the root dentry and vfsmount of /dev/sda1, then 674 * /dev/sda2, then /dev/sda3, then NULL. 675 * 676 * lookup_mnt takes a reference to the found vfsmount. 677 */ 678 struct vfsmount *lookup_mnt(struct path *path) 679 { 680 struct mount *child_mnt; 681 struct vfsmount *m; 682 unsigned seq; 683 684 rcu_read_lock(); 685 do { 686 seq = read_seqbegin(&mount_lock); 687 child_mnt = __lookup_mnt(path->mnt, path->dentry); 688 m = child_mnt ? &child_mnt->mnt : NULL; 689 } while (!legitimize_mnt(m, seq)); 690 rcu_read_unlock(); 691 return m; 692 } 693 694 /* 695 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the 696 * current mount namespace. 697 * 698 * The common case is dentries are not mountpoints at all and that 699 * test is handled inline. For the slow case when we are actually 700 * dealing with a mountpoint of some kind, walk through all of the 701 * mounts in the current mount namespace and test to see if the dentry 702 * is a mountpoint. 703 * 704 * The mount_hashtable is not usable in the context because we 705 * need to identify all mounts that may be in the current mount 706 * namespace not just a mount that happens to have some specified 707 * parent mount. 708 */ 709 bool __is_local_mountpoint(struct dentry *dentry) 710 { 711 struct mnt_namespace *ns = current->nsproxy->mnt_ns; 712 struct mount *mnt; 713 bool is_covered = false; 714 715 if (!d_mountpoint(dentry)) 716 goto out; 717 718 down_read(&namespace_sem); 719 list_for_each_entry(mnt, &ns->list, mnt_list) { 720 is_covered = (mnt->mnt_mountpoint == dentry); 721 if (is_covered) 722 break; 723 } 724 up_read(&namespace_sem); 725 out: 726 return is_covered; 727 } 728 729 static struct mountpoint *lookup_mountpoint(struct dentry *dentry) 730 { 731 struct hlist_head *chain = mp_hash(dentry); 732 struct mountpoint *mp; 733 734 hlist_for_each_entry(mp, chain, m_hash) { 735 if (mp->m_dentry == dentry) { 736 /* might be worth a WARN_ON() */ 737 if (d_unlinked(dentry)) 738 return ERR_PTR(-ENOENT); 739 mp->m_count++; 740 return mp; 741 } 742 } 743 return NULL; 744 } 745 746 static struct mountpoint *new_mountpoint(struct dentry *dentry) 747 { 748 struct hlist_head *chain = mp_hash(dentry); 749 struct mountpoint *mp; 750 int ret; 751 752 mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL); 753 if (!mp) 754 return ERR_PTR(-ENOMEM); 755 756 ret = d_set_mounted(dentry); 757 if (ret) { 758 kfree(mp); 759 return ERR_PTR(ret); 760 } 761 762 mp->m_dentry = dentry; 763 mp->m_count = 1; 764 hlist_add_head(&mp->m_hash, chain); 765 INIT_HLIST_HEAD(&mp->m_list); 766 return mp; 767 } 768 769 static void put_mountpoint(struct mountpoint *mp) 770 { 771 if (!--mp->m_count) { 772 struct dentry *dentry = mp->m_dentry; 773 BUG_ON(!hlist_empty(&mp->m_list)); 774 spin_lock(&dentry->d_lock); 775 dentry->d_flags &= ~DCACHE_MOUNTED; 776 spin_unlock(&dentry->d_lock); 777 hlist_del(&mp->m_hash); 778 kfree(mp); 779 } 780 } 781 782 static inline int check_mnt(struct mount *mnt) 783 { 784 return mnt->mnt_ns == current->nsproxy->mnt_ns; 785 } 786 787 /* 788 * vfsmount lock must be held for write 789 */ 790 static void touch_mnt_namespace(struct mnt_namespace *ns) 791 { 792 if (ns) { 793 ns->event = ++event; 794 wake_up_interruptible(&ns->poll); 795 } 796 } 797 798 /* 799 * vfsmount lock must be held for write 800 */ 801 static void __touch_mnt_namespace(struct mnt_namespace *ns) 802 { 803 if (ns && ns->event != event) { 804 ns->event = event; 805 wake_up_interruptible(&ns->poll); 806 } 807 } 808 809 /* 810 * vfsmount lock must be held for write 811 */ 812 static void unhash_mnt(struct mount *mnt) 813 { 814 mnt->mnt_parent = mnt; 815 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 816 list_del_init(&mnt->mnt_child); 817 hlist_del_init_rcu(&mnt->mnt_hash); 818 hlist_del_init(&mnt->mnt_mp_list); 819 put_mountpoint(mnt->mnt_mp); 820 mnt->mnt_mp = NULL; 821 } 822 823 /* 824 * vfsmount lock must be held for write 825 */ 826 static void detach_mnt(struct mount *mnt, struct path *old_path) 827 { 828 old_path->dentry = mnt->mnt_mountpoint; 829 old_path->mnt = &mnt->mnt_parent->mnt; 830 unhash_mnt(mnt); 831 } 832 833 /* 834 * vfsmount lock must be held for write 835 */ 836 static void umount_mnt(struct mount *mnt) 837 { 838 /* old mountpoint will be dropped when we can do that */ 839 mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint; 840 unhash_mnt(mnt); 841 } 842 843 /* 844 * vfsmount lock must be held for write 845 */ 846 void mnt_set_mountpoint(struct mount *mnt, 847 struct mountpoint *mp, 848 struct mount *child_mnt) 849 { 850 mp->m_count++; 851 mnt_add_count(mnt, 1); /* essentially, that's mntget */ 852 child_mnt->mnt_mountpoint = dget(mp->m_dentry); 853 child_mnt->mnt_parent = mnt; 854 child_mnt->mnt_mp = mp; 855 hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list); 856 } 857 858 /* 859 * vfsmount lock must be held for write 860 */ 861 static void attach_mnt(struct mount *mnt, 862 struct mount *parent, 863 struct mountpoint *mp) 864 { 865 mnt_set_mountpoint(parent, mp, mnt); 866 hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry)); 867 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); 868 } 869 870 static void attach_shadowed(struct mount *mnt, 871 struct mount *parent, 872 struct mount *shadows) 873 { 874 if (shadows) { 875 hlist_add_behind_rcu(&mnt->mnt_hash, &shadows->mnt_hash); 876 list_add(&mnt->mnt_child, &shadows->mnt_child); 877 } else { 878 hlist_add_head_rcu(&mnt->mnt_hash, 879 m_hash(&parent->mnt, mnt->mnt_mountpoint)); 880 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); 881 } 882 } 883 884 /* 885 * vfsmount lock must be held for write 886 */ 887 static void commit_tree(struct mount *mnt, struct mount *shadows) 888 { 889 struct mount *parent = mnt->mnt_parent; 890 struct mount *m; 891 LIST_HEAD(head); 892 struct mnt_namespace *n = parent->mnt_ns; 893 894 BUG_ON(parent == mnt); 895 896 list_add_tail(&head, &mnt->mnt_list); 897 list_for_each_entry(m, &head, mnt_list) 898 m->mnt_ns = n; 899 900 list_splice(&head, n->list.prev); 901 902 attach_shadowed(mnt, parent, shadows); 903 touch_mnt_namespace(n); 904 } 905 906 static struct mount *next_mnt(struct mount *p, struct mount *root) 907 { 908 struct list_head *next = p->mnt_mounts.next; 909 if (next == &p->mnt_mounts) { 910 while (1) { 911 if (p == root) 912 return NULL; 913 next = p->mnt_child.next; 914 if (next != &p->mnt_parent->mnt_mounts) 915 break; 916 p = p->mnt_parent; 917 } 918 } 919 return list_entry(next, struct mount, mnt_child); 920 } 921 922 static struct mount *skip_mnt_tree(struct mount *p) 923 { 924 struct list_head *prev = p->mnt_mounts.prev; 925 while (prev != &p->mnt_mounts) { 926 p = list_entry(prev, struct mount, mnt_child); 927 prev = p->mnt_mounts.prev; 928 } 929 return p; 930 } 931 932 struct vfsmount * 933 vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data) 934 { 935 struct mount *mnt; 936 struct dentry *root; 937 938 if (!type) 939 return ERR_PTR(-ENODEV); 940 941 mnt = alloc_vfsmnt(name); 942 if (!mnt) 943 return ERR_PTR(-ENOMEM); 944 945 if (flags & MS_KERNMOUNT) 946 mnt->mnt.mnt_flags = MNT_INTERNAL; 947 948 root = mount_fs(type, flags, name, data); 949 if (IS_ERR(root)) { 950 mnt_free_id(mnt); 951 free_vfsmnt(mnt); 952 return ERR_CAST(root); 953 } 954 955 mnt->mnt.mnt_root = root; 956 mnt->mnt.mnt_sb = root->d_sb; 957 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 958 mnt->mnt_parent = mnt; 959 lock_mount_hash(); 960 list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts); 961 unlock_mount_hash(); 962 return &mnt->mnt; 963 } 964 EXPORT_SYMBOL_GPL(vfs_kern_mount); 965 966 static struct mount *clone_mnt(struct mount *old, struct dentry *root, 967 int flag) 968 { 969 struct super_block *sb = old->mnt.mnt_sb; 970 struct mount *mnt; 971 int err; 972 973 mnt = alloc_vfsmnt(old->mnt_devname); 974 if (!mnt) 975 return ERR_PTR(-ENOMEM); 976 977 if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE)) 978 mnt->mnt_group_id = 0; /* not a peer of original */ 979 else 980 mnt->mnt_group_id = old->mnt_group_id; 981 982 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { 983 err = mnt_alloc_group_id(mnt); 984 if (err) 985 goto out_free; 986 } 987 988 mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED); 989 /* Don't allow unprivileged users to change mount flags */ 990 if (flag & CL_UNPRIVILEGED) { 991 mnt->mnt.mnt_flags |= MNT_LOCK_ATIME; 992 993 if (mnt->mnt.mnt_flags & MNT_READONLY) 994 mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; 995 996 if (mnt->mnt.mnt_flags & MNT_NODEV) 997 mnt->mnt.mnt_flags |= MNT_LOCK_NODEV; 998 999 if (mnt->mnt.mnt_flags & MNT_NOSUID) 1000 mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID; 1001 1002 if (mnt->mnt.mnt_flags & MNT_NOEXEC) 1003 mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC; 1004 } 1005 1006 /* Don't allow unprivileged users to reveal what is under a mount */ 1007 if ((flag & CL_UNPRIVILEGED) && 1008 (!(flag & CL_EXPIRE) || list_empty(&old->mnt_expire))) 1009 mnt->mnt.mnt_flags |= MNT_LOCKED; 1010 1011 atomic_inc(&sb->s_active); 1012 mnt->mnt.mnt_sb = sb; 1013 mnt->mnt.mnt_root = dget(root); 1014 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 1015 mnt->mnt_parent = mnt; 1016 lock_mount_hash(); 1017 list_add_tail(&mnt->mnt_instance, &sb->s_mounts); 1018 unlock_mount_hash(); 1019 1020 if ((flag & CL_SLAVE) || 1021 ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) { 1022 list_add(&mnt->mnt_slave, &old->mnt_slave_list); 1023 mnt->mnt_master = old; 1024 CLEAR_MNT_SHARED(mnt); 1025 } else if (!(flag & CL_PRIVATE)) { 1026 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old)) 1027 list_add(&mnt->mnt_share, &old->mnt_share); 1028 if (IS_MNT_SLAVE(old)) 1029 list_add(&mnt->mnt_slave, &old->mnt_slave); 1030 mnt->mnt_master = old->mnt_master; 1031 } 1032 if (flag & CL_MAKE_SHARED) 1033 set_mnt_shared(mnt); 1034 1035 /* stick the duplicate mount on the same expiry list 1036 * as the original if that was on one */ 1037 if (flag & CL_EXPIRE) { 1038 if (!list_empty(&old->mnt_expire)) 1039 list_add(&mnt->mnt_expire, &old->mnt_expire); 1040 } 1041 1042 return mnt; 1043 1044 out_free: 1045 mnt_free_id(mnt); 1046 free_vfsmnt(mnt); 1047 return ERR_PTR(err); 1048 } 1049 1050 static void cleanup_mnt(struct mount *mnt) 1051 { 1052 /* 1053 * This probably indicates that somebody messed 1054 * up a mnt_want/drop_write() pair. If this 1055 * happens, the filesystem was probably unable 1056 * to make r/w->r/o transitions. 1057 */ 1058 /* 1059 * The locking used to deal with mnt_count decrement provides barriers, 1060 * so mnt_get_writers() below is safe. 1061 */ 1062 WARN_ON(mnt_get_writers(mnt)); 1063 if (unlikely(mnt->mnt_pins.first)) 1064 mnt_pin_kill(mnt); 1065 fsnotify_vfsmount_delete(&mnt->mnt); 1066 dput(mnt->mnt.mnt_root); 1067 deactivate_super(mnt->mnt.mnt_sb); 1068 mnt_free_id(mnt); 1069 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt); 1070 } 1071 1072 static void __cleanup_mnt(struct rcu_head *head) 1073 { 1074 cleanup_mnt(container_of(head, struct mount, mnt_rcu)); 1075 } 1076 1077 static LLIST_HEAD(delayed_mntput_list); 1078 static void delayed_mntput(struct work_struct *unused) 1079 { 1080 struct llist_node *node = llist_del_all(&delayed_mntput_list); 1081 struct llist_node *next; 1082 1083 for (; node; node = next) { 1084 next = llist_next(node); 1085 cleanup_mnt(llist_entry(node, struct mount, mnt_llist)); 1086 } 1087 } 1088 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput); 1089 1090 static void mntput_no_expire(struct mount *mnt) 1091 { 1092 rcu_read_lock(); 1093 mnt_add_count(mnt, -1); 1094 if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */ 1095 rcu_read_unlock(); 1096 return; 1097 } 1098 lock_mount_hash(); 1099 if (mnt_get_count(mnt)) { 1100 rcu_read_unlock(); 1101 unlock_mount_hash(); 1102 return; 1103 } 1104 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) { 1105 rcu_read_unlock(); 1106 unlock_mount_hash(); 1107 return; 1108 } 1109 mnt->mnt.mnt_flags |= MNT_DOOMED; 1110 rcu_read_unlock(); 1111 1112 list_del(&mnt->mnt_instance); 1113 1114 if (unlikely(!list_empty(&mnt->mnt_mounts))) { 1115 struct mount *p, *tmp; 1116 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { 1117 umount_mnt(p); 1118 } 1119 } 1120 unlock_mount_hash(); 1121 1122 if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) { 1123 struct task_struct *task = current; 1124 if (likely(!(task->flags & PF_KTHREAD))) { 1125 init_task_work(&mnt->mnt_rcu, __cleanup_mnt); 1126 if (!task_work_add(task, &mnt->mnt_rcu, true)) 1127 return; 1128 } 1129 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list)) 1130 schedule_delayed_work(&delayed_mntput_work, 1); 1131 return; 1132 } 1133 cleanup_mnt(mnt); 1134 } 1135 1136 void mntput(struct vfsmount *mnt) 1137 { 1138 if (mnt) { 1139 struct mount *m = real_mount(mnt); 1140 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */ 1141 if (unlikely(m->mnt_expiry_mark)) 1142 m->mnt_expiry_mark = 0; 1143 mntput_no_expire(m); 1144 } 1145 } 1146 EXPORT_SYMBOL(mntput); 1147 1148 struct vfsmount *mntget(struct vfsmount *mnt) 1149 { 1150 if (mnt) 1151 mnt_add_count(real_mount(mnt), 1); 1152 return mnt; 1153 } 1154 EXPORT_SYMBOL(mntget); 1155 1156 struct vfsmount *mnt_clone_internal(struct path *path) 1157 { 1158 struct mount *p; 1159 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE); 1160 if (IS_ERR(p)) 1161 return ERR_CAST(p); 1162 p->mnt.mnt_flags |= MNT_INTERNAL; 1163 return &p->mnt; 1164 } 1165 1166 static inline void mangle(struct seq_file *m, const char *s) 1167 { 1168 seq_escape(m, s, " \t\n\\"); 1169 } 1170 1171 /* 1172 * Simple .show_options callback for filesystems which don't want to 1173 * implement more complex mount option showing. 1174 * 1175 * See also save_mount_options(). 1176 */ 1177 int generic_show_options(struct seq_file *m, struct dentry *root) 1178 { 1179 const char *options; 1180 1181 rcu_read_lock(); 1182 options = rcu_dereference(root->d_sb->s_options); 1183 1184 if (options != NULL && options[0]) { 1185 seq_putc(m, ','); 1186 mangle(m, options); 1187 } 1188 rcu_read_unlock(); 1189 1190 return 0; 1191 } 1192 EXPORT_SYMBOL(generic_show_options); 1193 1194 /* 1195 * If filesystem uses generic_show_options(), this function should be 1196 * called from the fill_super() callback. 1197 * 1198 * The .remount_fs callback usually needs to be handled in a special 1199 * way, to make sure, that previous options are not overwritten if the 1200 * remount fails. 1201 * 1202 * Also note, that if the filesystem's .remount_fs function doesn't 1203 * reset all options to their default value, but changes only newly 1204 * given options, then the displayed options will not reflect reality 1205 * any more. 1206 */ 1207 void save_mount_options(struct super_block *sb, char *options) 1208 { 1209 BUG_ON(sb->s_options); 1210 rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL)); 1211 } 1212 EXPORT_SYMBOL(save_mount_options); 1213 1214 void replace_mount_options(struct super_block *sb, char *options) 1215 { 1216 char *old = sb->s_options; 1217 rcu_assign_pointer(sb->s_options, options); 1218 if (old) { 1219 synchronize_rcu(); 1220 kfree(old); 1221 } 1222 } 1223 EXPORT_SYMBOL(replace_mount_options); 1224 1225 #ifdef CONFIG_PROC_FS 1226 /* iterator; we want it to have access to namespace_sem, thus here... */ 1227 static void *m_start(struct seq_file *m, loff_t *pos) 1228 { 1229 struct proc_mounts *p = m->private; 1230 1231 down_read(&namespace_sem); 1232 if (p->cached_event == p->ns->event) { 1233 void *v = p->cached_mount; 1234 if (*pos == p->cached_index) 1235 return v; 1236 if (*pos == p->cached_index + 1) { 1237 v = seq_list_next(v, &p->ns->list, &p->cached_index); 1238 return p->cached_mount = v; 1239 } 1240 } 1241 1242 p->cached_event = p->ns->event; 1243 p->cached_mount = seq_list_start(&p->ns->list, *pos); 1244 p->cached_index = *pos; 1245 return p->cached_mount; 1246 } 1247 1248 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 1249 { 1250 struct proc_mounts *p = m->private; 1251 1252 p->cached_mount = seq_list_next(v, &p->ns->list, pos); 1253 p->cached_index = *pos; 1254 return p->cached_mount; 1255 } 1256 1257 static void m_stop(struct seq_file *m, void *v) 1258 { 1259 up_read(&namespace_sem); 1260 } 1261 1262 static int m_show(struct seq_file *m, void *v) 1263 { 1264 struct proc_mounts *p = m->private; 1265 struct mount *r = list_entry(v, struct mount, mnt_list); 1266 return p->show(m, &r->mnt); 1267 } 1268 1269 const struct seq_operations mounts_op = { 1270 .start = m_start, 1271 .next = m_next, 1272 .stop = m_stop, 1273 .show = m_show, 1274 }; 1275 #endif /* CONFIG_PROC_FS */ 1276 1277 /** 1278 * may_umount_tree - check if a mount tree is busy 1279 * @mnt: root of mount tree 1280 * 1281 * This is called to check if a tree of mounts has any 1282 * open files, pwds, chroots or sub mounts that are 1283 * busy. 1284 */ 1285 int may_umount_tree(struct vfsmount *m) 1286 { 1287 struct mount *mnt = real_mount(m); 1288 int actual_refs = 0; 1289 int minimum_refs = 0; 1290 struct mount *p; 1291 BUG_ON(!m); 1292 1293 /* write lock needed for mnt_get_count */ 1294 lock_mount_hash(); 1295 for (p = mnt; p; p = next_mnt(p, mnt)) { 1296 actual_refs += mnt_get_count(p); 1297 minimum_refs += 2; 1298 } 1299 unlock_mount_hash(); 1300 1301 if (actual_refs > minimum_refs) 1302 return 0; 1303 1304 return 1; 1305 } 1306 1307 EXPORT_SYMBOL(may_umount_tree); 1308 1309 /** 1310 * may_umount - check if a mount point is busy 1311 * @mnt: root of mount 1312 * 1313 * This is called to check if a mount point has any 1314 * open files, pwds, chroots or sub mounts. If the 1315 * mount has sub mounts this will return busy 1316 * regardless of whether the sub mounts are busy. 1317 * 1318 * Doesn't take quota and stuff into account. IOW, in some cases it will 1319 * give false negatives. The main reason why it's here is that we need 1320 * a non-destructive way to look for easily umountable filesystems. 1321 */ 1322 int may_umount(struct vfsmount *mnt) 1323 { 1324 int ret = 1; 1325 down_read(&namespace_sem); 1326 lock_mount_hash(); 1327 if (propagate_mount_busy(real_mount(mnt), 2)) 1328 ret = 0; 1329 unlock_mount_hash(); 1330 up_read(&namespace_sem); 1331 return ret; 1332 } 1333 1334 EXPORT_SYMBOL(may_umount); 1335 1336 static HLIST_HEAD(unmounted); /* protected by namespace_sem */ 1337 1338 static void namespace_unlock(void) 1339 { 1340 struct hlist_head head; 1341 1342 hlist_move_list(&unmounted, &head); 1343 1344 up_write(&namespace_sem); 1345 1346 if (likely(hlist_empty(&head))) 1347 return; 1348 1349 synchronize_rcu(); 1350 1351 group_pin_kill(&head); 1352 } 1353 1354 static inline void namespace_lock(void) 1355 { 1356 down_write(&namespace_sem); 1357 } 1358 1359 enum umount_tree_flags { 1360 UMOUNT_SYNC = 1, 1361 UMOUNT_PROPAGATE = 2, 1362 UMOUNT_CONNECTED = 4, 1363 }; 1364 1365 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how) 1366 { 1367 /* Leaving mounts connected is only valid for lazy umounts */ 1368 if (how & UMOUNT_SYNC) 1369 return true; 1370 1371 /* A mount without a parent has nothing to be connected to */ 1372 if (!mnt_has_parent(mnt)) 1373 return true; 1374 1375 /* Because the reference counting rules change when mounts are 1376 * unmounted and connected, umounted mounts may not be 1377 * connected to mounted mounts. 1378 */ 1379 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) 1380 return true; 1381 1382 /* Has it been requested that the mount remain connected? */ 1383 if (how & UMOUNT_CONNECTED) 1384 return false; 1385 1386 /* Is the mount locked such that it needs to remain connected? */ 1387 if (IS_MNT_LOCKED(mnt)) 1388 return false; 1389 1390 /* By default disconnect the mount */ 1391 return true; 1392 } 1393 1394 /* 1395 * mount_lock must be held 1396 * namespace_sem must be held for write 1397 */ 1398 static void umount_tree(struct mount *mnt, enum umount_tree_flags how) 1399 { 1400 LIST_HEAD(tmp_list); 1401 struct mount *p; 1402 1403 if (how & UMOUNT_PROPAGATE) 1404 propagate_mount_unlock(mnt); 1405 1406 /* Gather the mounts to umount */ 1407 for (p = mnt; p; p = next_mnt(p, mnt)) { 1408 p->mnt.mnt_flags |= MNT_UMOUNT; 1409 list_move(&p->mnt_list, &tmp_list); 1410 } 1411 1412 /* Hide the mounts from mnt_mounts */ 1413 list_for_each_entry(p, &tmp_list, mnt_list) { 1414 list_del_init(&p->mnt_child); 1415 } 1416 1417 /* Add propogated mounts to the tmp_list */ 1418 if (how & UMOUNT_PROPAGATE) 1419 propagate_umount(&tmp_list); 1420 1421 while (!list_empty(&tmp_list)) { 1422 bool disconnect; 1423 p = list_first_entry(&tmp_list, struct mount, mnt_list); 1424 list_del_init(&p->mnt_expire); 1425 list_del_init(&p->mnt_list); 1426 __touch_mnt_namespace(p->mnt_ns); 1427 p->mnt_ns = NULL; 1428 if (how & UMOUNT_SYNC) 1429 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; 1430 1431 disconnect = disconnect_mount(p, how); 1432 1433 pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, 1434 disconnect ? &unmounted : NULL); 1435 if (mnt_has_parent(p)) { 1436 mnt_add_count(p->mnt_parent, -1); 1437 if (!disconnect) { 1438 /* Don't forget about p */ 1439 list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts); 1440 } else { 1441 umount_mnt(p); 1442 } 1443 } 1444 change_mnt_propagation(p, MS_PRIVATE); 1445 } 1446 } 1447 1448 static void shrink_submounts(struct mount *mnt); 1449 1450 static int do_umount(struct mount *mnt, int flags) 1451 { 1452 struct super_block *sb = mnt->mnt.mnt_sb; 1453 int retval; 1454 1455 retval = security_sb_umount(&mnt->mnt, flags); 1456 if (retval) 1457 return retval; 1458 1459 /* 1460 * Allow userspace to request a mountpoint be expired rather than 1461 * unmounting unconditionally. Unmount only happens if: 1462 * (1) the mark is already set (the mark is cleared by mntput()) 1463 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] 1464 */ 1465 if (flags & MNT_EXPIRE) { 1466 if (&mnt->mnt == current->fs->root.mnt || 1467 flags & (MNT_FORCE | MNT_DETACH)) 1468 return -EINVAL; 1469 1470 /* 1471 * probably don't strictly need the lock here if we examined 1472 * all race cases, but it's a slowpath. 1473 */ 1474 lock_mount_hash(); 1475 if (mnt_get_count(mnt) != 2) { 1476 unlock_mount_hash(); 1477 return -EBUSY; 1478 } 1479 unlock_mount_hash(); 1480 1481 if (!xchg(&mnt->mnt_expiry_mark, 1)) 1482 return -EAGAIN; 1483 } 1484 1485 /* 1486 * If we may have to abort operations to get out of this 1487 * mount, and they will themselves hold resources we must 1488 * allow the fs to do things. In the Unix tradition of 1489 * 'Gee thats tricky lets do it in userspace' the umount_begin 1490 * might fail to complete on the first run through as other tasks 1491 * must return, and the like. Thats for the mount program to worry 1492 * about for the moment. 1493 */ 1494 1495 if (flags & MNT_FORCE && sb->s_op->umount_begin) { 1496 sb->s_op->umount_begin(sb); 1497 } 1498 1499 /* 1500 * No sense to grab the lock for this test, but test itself looks 1501 * somewhat bogus. Suggestions for better replacement? 1502 * Ho-hum... In principle, we might treat that as umount + switch 1503 * to rootfs. GC would eventually take care of the old vfsmount. 1504 * Actually it makes sense, especially if rootfs would contain a 1505 * /reboot - static binary that would close all descriptors and 1506 * call reboot(9). Then init(8) could umount root and exec /reboot. 1507 */ 1508 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { 1509 /* 1510 * Special case for "unmounting" root ... 1511 * we just try to remount it readonly. 1512 */ 1513 if (!capable(CAP_SYS_ADMIN)) 1514 return -EPERM; 1515 down_write(&sb->s_umount); 1516 if (!(sb->s_flags & MS_RDONLY)) 1517 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); 1518 up_write(&sb->s_umount); 1519 return retval; 1520 } 1521 1522 namespace_lock(); 1523 lock_mount_hash(); 1524 event++; 1525 1526 if (flags & MNT_DETACH) { 1527 if (!list_empty(&mnt->mnt_list)) 1528 umount_tree(mnt, UMOUNT_PROPAGATE); 1529 retval = 0; 1530 } else { 1531 shrink_submounts(mnt); 1532 retval = -EBUSY; 1533 if (!propagate_mount_busy(mnt, 2)) { 1534 if (!list_empty(&mnt->mnt_list)) 1535 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); 1536 retval = 0; 1537 } 1538 } 1539 unlock_mount_hash(); 1540 namespace_unlock(); 1541 return retval; 1542 } 1543 1544 /* 1545 * __detach_mounts - lazily unmount all mounts on the specified dentry 1546 * 1547 * During unlink, rmdir, and d_drop it is possible to loose the path 1548 * to an existing mountpoint, and wind up leaking the mount. 1549 * detach_mounts allows lazily unmounting those mounts instead of 1550 * leaking them. 1551 * 1552 * The caller may hold dentry->d_inode->i_mutex. 1553 */ 1554 void __detach_mounts(struct dentry *dentry) 1555 { 1556 struct mountpoint *mp; 1557 struct mount *mnt; 1558 1559 namespace_lock(); 1560 mp = lookup_mountpoint(dentry); 1561 if (IS_ERR_OR_NULL(mp)) 1562 goto out_unlock; 1563 1564 lock_mount_hash(); 1565 while (!hlist_empty(&mp->m_list)) { 1566 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); 1567 if (mnt->mnt.mnt_flags & MNT_UMOUNT) { 1568 hlist_add_head(&mnt->mnt_umount.s_list, &unmounted); 1569 umount_mnt(mnt); 1570 } 1571 else umount_tree(mnt, UMOUNT_CONNECTED); 1572 } 1573 unlock_mount_hash(); 1574 put_mountpoint(mp); 1575 out_unlock: 1576 namespace_unlock(); 1577 } 1578 1579 /* 1580 * Is the caller allowed to modify his namespace? 1581 */ 1582 static inline bool may_mount(void) 1583 { 1584 return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); 1585 } 1586 1587 /* 1588 * Now umount can handle mount points as well as block devices. 1589 * This is important for filesystems which use unnamed block devices. 1590 * 1591 * We now support a flag for forced unmount like the other 'big iron' 1592 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD 1593 */ 1594 1595 SYSCALL_DEFINE2(umount, char __user *, name, int, flags) 1596 { 1597 struct path path; 1598 struct mount *mnt; 1599 int retval; 1600 int lookup_flags = 0; 1601 1602 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) 1603 return -EINVAL; 1604 1605 if (!may_mount()) 1606 return -EPERM; 1607 1608 if (!(flags & UMOUNT_NOFOLLOW)) 1609 lookup_flags |= LOOKUP_FOLLOW; 1610 1611 retval = user_path_mountpoint_at(AT_FDCWD, name, lookup_flags, &path); 1612 if (retval) 1613 goto out; 1614 mnt = real_mount(path.mnt); 1615 retval = -EINVAL; 1616 if (path.dentry != path.mnt->mnt_root) 1617 goto dput_and_out; 1618 if (!check_mnt(mnt)) 1619 goto dput_and_out; 1620 if (mnt->mnt.mnt_flags & MNT_LOCKED) 1621 goto dput_and_out; 1622 retval = -EPERM; 1623 if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN)) 1624 goto dput_and_out; 1625 1626 retval = do_umount(mnt, flags); 1627 dput_and_out: 1628 /* we mustn't call path_put() as that would clear mnt_expiry_mark */ 1629 dput(path.dentry); 1630 mntput_no_expire(mnt); 1631 out: 1632 return retval; 1633 } 1634 1635 #ifdef __ARCH_WANT_SYS_OLDUMOUNT 1636 1637 /* 1638 * The 2.0 compatible umount. No flags. 1639 */ 1640 SYSCALL_DEFINE1(oldumount, char __user *, name) 1641 { 1642 return sys_umount(name, 0); 1643 } 1644 1645 #endif 1646 1647 static bool is_mnt_ns_file(struct dentry *dentry) 1648 { 1649 /* Is this a proxy for a mount namespace? */ 1650 return dentry->d_op == &ns_dentry_operations && 1651 dentry->d_fsdata == &mntns_operations; 1652 } 1653 1654 struct mnt_namespace *to_mnt_ns(struct ns_common *ns) 1655 { 1656 return container_of(ns, struct mnt_namespace, ns); 1657 } 1658 1659 static bool mnt_ns_loop(struct dentry *dentry) 1660 { 1661 /* Could bind mounting the mount namespace inode cause a 1662 * mount namespace loop? 1663 */ 1664 struct mnt_namespace *mnt_ns; 1665 if (!is_mnt_ns_file(dentry)) 1666 return false; 1667 1668 mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode)); 1669 return current->nsproxy->mnt_ns->seq >= mnt_ns->seq; 1670 } 1671 1672 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, 1673 int flag) 1674 { 1675 struct mount *res, *p, *q, *r, *parent; 1676 1677 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt)) 1678 return ERR_PTR(-EINVAL); 1679 1680 if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry)) 1681 return ERR_PTR(-EINVAL); 1682 1683 res = q = clone_mnt(mnt, dentry, flag); 1684 if (IS_ERR(q)) 1685 return q; 1686 1687 q->mnt_mountpoint = mnt->mnt_mountpoint; 1688 1689 p = mnt; 1690 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) { 1691 struct mount *s; 1692 if (!is_subdir(r->mnt_mountpoint, dentry)) 1693 continue; 1694 1695 for (s = r; s; s = next_mnt(s, r)) { 1696 struct mount *t = NULL; 1697 if (!(flag & CL_COPY_UNBINDABLE) && 1698 IS_MNT_UNBINDABLE(s)) { 1699 s = skip_mnt_tree(s); 1700 continue; 1701 } 1702 if (!(flag & CL_COPY_MNT_NS_FILE) && 1703 is_mnt_ns_file(s->mnt.mnt_root)) { 1704 s = skip_mnt_tree(s); 1705 continue; 1706 } 1707 while (p != s->mnt_parent) { 1708 p = p->mnt_parent; 1709 q = q->mnt_parent; 1710 } 1711 p = s; 1712 parent = q; 1713 q = clone_mnt(p, p->mnt.mnt_root, flag); 1714 if (IS_ERR(q)) 1715 goto out; 1716 lock_mount_hash(); 1717 list_add_tail(&q->mnt_list, &res->mnt_list); 1718 mnt_set_mountpoint(parent, p->mnt_mp, q); 1719 if (!list_empty(&parent->mnt_mounts)) { 1720 t = list_last_entry(&parent->mnt_mounts, 1721 struct mount, mnt_child); 1722 if (t->mnt_mp != p->mnt_mp) 1723 t = NULL; 1724 } 1725 attach_shadowed(q, parent, t); 1726 unlock_mount_hash(); 1727 } 1728 } 1729 return res; 1730 out: 1731 if (res) { 1732 lock_mount_hash(); 1733 umount_tree(res, UMOUNT_SYNC); 1734 unlock_mount_hash(); 1735 } 1736 return q; 1737 } 1738 1739 /* Caller should check returned pointer for errors */ 1740 1741 struct vfsmount *collect_mounts(struct path *path) 1742 { 1743 struct mount *tree; 1744 namespace_lock(); 1745 if (!check_mnt(real_mount(path->mnt))) 1746 tree = ERR_PTR(-EINVAL); 1747 else 1748 tree = copy_tree(real_mount(path->mnt), path->dentry, 1749 CL_COPY_ALL | CL_PRIVATE); 1750 namespace_unlock(); 1751 if (IS_ERR(tree)) 1752 return ERR_CAST(tree); 1753 return &tree->mnt; 1754 } 1755 1756 void drop_collected_mounts(struct vfsmount *mnt) 1757 { 1758 namespace_lock(); 1759 lock_mount_hash(); 1760 umount_tree(real_mount(mnt), UMOUNT_SYNC); 1761 unlock_mount_hash(); 1762 namespace_unlock(); 1763 } 1764 1765 /** 1766 * clone_private_mount - create a private clone of a path 1767 * 1768 * This creates a new vfsmount, which will be the clone of @path. The new will 1769 * not be attached anywhere in the namespace and will be private (i.e. changes 1770 * to the originating mount won't be propagated into this). 1771 * 1772 * Release with mntput(). 1773 */ 1774 struct vfsmount *clone_private_mount(struct path *path) 1775 { 1776 struct mount *old_mnt = real_mount(path->mnt); 1777 struct mount *new_mnt; 1778 1779 if (IS_MNT_UNBINDABLE(old_mnt)) 1780 return ERR_PTR(-EINVAL); 1781 1782 down_read(&namespace_sem); 1783 new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE); 1784 up_read(&namespace_sem); 1785 if (IS_ERR(new_mnt)) 1786 return ERR_CAST(new_mnt); 1787 1788 return &new_mnt->mnt; 1789 } 1790 EXPORT_SYMBOL_GPL(clone_private_mount); 1791 1792 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, 1793 struct vfsmount *root) 1794 { 1795 struct mount *mnt; 1796 int res = f(root, arg); 1797 if (res) 1798 return res; 1799 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) { 1800 res = f(&mnt->mnt, arg); 1801 if (res) 1802 return res; 1803 } 1804 return 0; 1805 } 1806 1807 static void cleanup_group_ids(struct mount *mnt, struct mount *end) 1808 { 1809 struct mount *p; 1810 1811 for (p = mnt; p != end; p = next_mnt(p, mnt)) { 1812 if (p->mnt_group_id && !IS_MNT_SHARED(p)) 1813 mnt_release_group_id(p); 1814 } 1815 } 1816 1817 static int invent_group_ids(struct mount *mnt, bool recurse) 1818 { 1819 struct mount *p; 1820 1821 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) { 1822 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) { 1823 int err = mnt_alloc_group_id(p); 1824 if (err) { 1825 cleanup_group_ids(mnt, p); 1826 return err; 1827 } 1828 } 1829 } 1830 1831 return 0; 1832 } 1833 1834 /* 1835 * @source_mnt : mount tree to be attached 1836 * @nd : place the mount tree @source_mnt is attached 1837 * @parent_nd : if non-null, detach the source_mnt from its parent and 1838 * store the parent mount and mountpoint dentry. 1839 * (done when source_mnt is moved) 1840 * 1841 * NOTE: in the table below explains the semantics when a source mount 1842 * of a given type is attached to a destination mount of a given type. 1843 * --------------------------------------------------------------------------- 1844 * | BIND MOUNT OPERATION | 1845 * |************************************************************************** 1846 * | source-->| shared | private | slave | unbindable | 1847 * | dest | | | | | 1848 * | | | | | | | 1849 * | v | | | | | 1850 * |************************************************************************** 1851 * | shared | shared (++) | shared (+) | shared(+++)| invalid | 1852 * | | | | | | 1853 * |non-shared| shared (+) | private | slave (*) | invalid | 1854 * *************************************************************************** 1855 * A bind operation clones the source mount and mounts the clone on the 1856 * destination mount. 1857 * 1858 * (++) the cloned mount is propagated to all the mounts in the propagation 1859 * tree of the destination mount and the cloned mount is added to 1860 * the peer group of the source mount. 1861 * (+) the cloned mount is created under the destination mount and is marked 1862 * as shared. The cloned mount is added to the peer group of the source 1863 * mount. 1864 * (+++) the mount is propagated to all the mounts in the propagation tree 1865 * of the destination mount and the cloned mount is made slave 1866 * of the same master as that of the source mount. The cloned mount 1867 * is marked as 'shared and slave'. 1868 * (*) the cloned mount is made a slave of the same master as that of the 1869 * source mount. 1870 * 1871 * --------------------------------------------------------------------------- 1872 * | MOVE MOUNT OPERATION | 1873 * |************************************************************************** 1874 * | source-->| shared | private | slave | unbindable | 1875 * | dest | | | | | 1876 * | | | | | | | 1877 * | v | | | | | 1878 * |************************************************************************** 1879 * | shared | shared (+) | shared (+) | shared(+++) | invalid | 1880 * | | | | | | 1881 * |non-shared| shared (+*) | private | slave (*) | unbindable | 1882 * *************************************************************************** 1883 * 1884 * (+) the mount is moved to the destination. And is then propagated to 1885 * all the mounts in the propagation tree of the destination mount. 1886 * (+*) the mount is moved to the destination. 1887 * (+++) the mount is moved to the destination and is then propagated to 1888 * all the mounts belonging to the destination mount's propagation tree. 1889 * the mount is marked as 'shared and slave'. 1890 * (*) the mount continues to be a slave at the new location. 1891 * 1892 * if the source mount is a tree, the operations explained above is 1893 * applied to each mount in the tree. 1894 * Must be called without spinlocks held, since this function can sleep 1895 * in allocations. 1896 */ 1897 static int attach_recursive_mnt(struct mount *source_mnt, 1898 struct mount *dest_mnt, 1899 struct mountpoint *dest_mp, 1900 struct path *parent_path) 1901 { 1902 HLIST_HEAD(tree_list); 1903 struct mount *child, *p; 1904 struct hlist_node *n; 1905 int err; 1906 1907 if (IS_MNT_SHARED(dest_mnt)) { 1908 err = invent_group_ids(source_mnt, true); 1909 if (err) 1910 goto out; 1911 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); 1912 lock_mount_hash(); 1913 if (err) 1914 goto out_cleanup_ids; 1915 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 1916 set_mnt_shared(p); 1917 } else { 1918 lock_mount_hash(); 1919 } 1920 if (parent_path) { 1921 detach_mnt(source_mnt, parent_path); 1922 attach_mnt(source_mnt, dest_mnt, dest_mp); 1923 touch_mnt_namespace(source_mnt->mnt_ns); 1924 } else { 1925 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); 1926 commit_tree(source_mnt, NULL); 1927 } 1928 1929 hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) { 1930 struct mount *q; 1931 hlist_del_init(&child->mnt_hash); 1932 q = __lookup_mnt_last(&child->mnt_parent->mnt, 1933 child->mnt_mountpoint); 1934 commit_tree(child, q); 1935 } 1936 unlock_mount_hash(); 1937 1938 return 0; 1939 1940 out_cleanup_ids: 1941 while (!hlist_empty(&tree_list)) { 1942 child = hlist_entry(tree_list.first, struct mount, mnt_hash); 1943 umount_tree(child, UMOUNT_SYNC); 1944 } 1945 unlock_mount_hash(); 1946 cleanup_group_ids(source_mnt, NULL); 1947 out: 1948 return err; 1949 } 1950 1951 static struct mountpoint *lock_mount(struct path *path) 1952 { 1953 struct vfsmount *mnt; 1954 struct dentry *dentry = path->dentry; 1955 retry: 1956 mutex_lock(&dentry->d_inode->i_mutex); 1957 if (unlikely(cant_mount(dentry))) { 1958 mutex_unlock(&dentry->d_inode->i_mutex); 1959 return ERR_PTR(-ENOENT); 1960 } 1961 namespace_lock(); 1962 mnt = lookup_mnt(path); 1963 if (likely(!mnt)) { 1964 struct mountpoint *mp = lookup_mountpoint(dentry); 1965 if (!mp) 1966 mp = new_mountpoint(dentry); 1967 if (IS_ERR(mp)) { 1968 namespace_unlock(); 1969 mutex_unlock(&dentry->d_inode->i_mutex); 1970 return mp; 1971 } 1972 return mp; 1973 } 1974 namespace_unlock(); 1975 mutex_unlock(&path->dentry->d_inode->i_mutex); 1976 path_put(path); 1977 path->mnt = mnt; 1978 dentry = path->dentry = dget(mnt->mnt_root); 1979 goto retry; 1980 } 1981 1982 static void unlock_mount(struct mountpoint *where) 1983 { 1984 struct dentry *dentry = where->m_dentry; 1985 put_mountpoint(where); 1986 namespace_unlock(); 1987 mutex_unlock(&dentry->d_inode->i_mutex); 1988 } 1989 1990 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp) 1991 { 1992 if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER) 1993 return -EINVAL; 1994 1995 if (d_is_dir(mp->m_dentry) != 1996 d_is_dir(mnt->mnt.mnt_root)) 1997 return -ENOTDIR; 1998 1999 return attach_recursive_mnt(mnt, p, mp, NULL); 2000 } 2001 2002 /* 2003 * Sanity check the flags to change_mnt_propagation. 2004 */ 2005 2006 static int flags_to_propagation_type(int flags) 2007 { 2008 int type = flags & ~(MS_REC | MS_SILENT); 2009 2010 /* Fail if any non-propagation flags are set */ 2011 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) 2012 return 0; 2013 /* Only one propagation flag should be set */ 2014 if (!is_power_of_2(type)) 2015 return 0; 2016 return type; 2017 } 2018 2019 /* 2020 * recursively change the type of the mountpoint. 2021 */ 2022 static int do_change_type(struct path *path, int flag) 2023 { 2024 struct mount *m; 2025 struct mount *mnt = real_mount(path->mnt); 2026 int recurse = flag & MS_REC; 2027 int type; 2028 int err = 0; 2029 2030 if (path->dentry != path->mnt->mnt_root) 2031 return -EINVAL; 2032 2033 type = flags_to_propagation_type(flag); 2034 if (!type) 2035 return -EINVAL; 2036 2037 namespace_lock(); 2038 if (type == MS_SHARED) { 2039 err = invent_group_ids(mnt, recurse); 2040 if (err) 2041 goto out_unlock; 2042 } 2043 2044 lock_mount_hash(); 2045 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) 2046 change_mnt_propagation(m, type); 2047 unlock_mount_hash(); 2048 2049 out_unlock: 2050 namespace_unlock(); 2051 return err; 2052 } 2053 2054 static bool has_locked_children(struct mount *mnt, struct dentry *dentry) 2055 { 2056 struct mount *child; 2057 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { 2058 if (!is_subdir(child->mnt_mountpoint, dentry)) 2059 continue; 2060 2061 if (child->mnt.mnt_flags & MNT_LOCKED) 2062 return true; 2063 } 2064 return false; 2065 } 2066 2067 /* 2068 * do loopback mount. 2069 */ 2070 static int do_loopback(struct path *path, const char *old_name, 2071 int recurse) 2072 { 2073 struct path old_path; 2074 struct mount *mnt = NULL, *old, *parent; 2075 struct mountpoint *mp; 2076 int err; 2077 if (!old_name || !*old_name) 2078 return -EINVAL; 2079 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path); 2080 if (err) 2081 return err; 2082 2083 err = -EINVAL; 2084 if (mnt_ns_loop(old_path.dentry)) 2085 goto out; 2086 2087 mp = lock_mount(path); 2088 err = PTR_ERR(mp); 2089 if (IS_ERR(mp)) 2090 goto out; 2091 2092 old = real_mount(old_path.mnt); 2093 parent = real_mount(path->mnt); 2094 2095 err = -EINVAL; 2096 if (IS_MNT_UNBINDABLE(old)) 2097 goto out2; 2098 2099 if (!check_mnt(parent)) 2100 goto out2; 2101 2102 if (!check_mnt(old) && old_path.dentry->d_op != &ns_dentry_operations) 2103 goto out2; 2104 2105 if (!recurse && has_locked_children(old, old_path.dentry)) 2106 goto out2; 2107 2108 if (recurse) 2109 mnt = copy_tree(old, old_path.dentry, CL_COPY_MNT_NS_FILE); 2110 else 2111 mnt = clone_mnt(old, old_path.dentry, 0); 2112 2113 if (IS_ERR(mnt)) { 2114 err = PTR_ERR(mnt); 2115 goto out2; 2116 } 2117 2118 mnt->mnt.mnt_flags &= ~MNT_LOCKED; 2119 2120 err = graft_tree(mnt, parent, mp); 2121 if (err) { 2122 lock_mount_hash(); 2123 umount_tree(mnt, UMOUNT_SYNC); 2124 unlock_mount_hash(); 2125 } 2126 out2: 2127 unlock_mount(mp); 2128 out: 2129 path_put(&old_path); 2130 return err; 2131 } 2132 2133 static int change_mount_flags(struct vfsmount *mnt, int ms_flags) 2134 { 2135 int error = 0; 2136 int readonly_request = 0; 2137 2138 if (ms_flags & MS_RDONLY) 2139 readonly_request = 1; 2140 if (readonly_request == __mnt_is_readonly(mnt)) 2141 return 0; 2142 2143 if (readonly_request) 2144 error = mnt_make_readonly(real_mount(mnt)); 2145 else 2146 __mnt_unmake_readonly(real_mount(mnt)); 2147 return error; 2148 } 2149 2150 /* 2151 * change filesystem flags. dir should be a physical root of filesystem. 2152 * If you've mounted a non-root directory somewhere and want to do remount 2153 * on it - tough luck. 2154 */ 2155 static int do_remount(struct path *path, int flags, int mnt_flags, 2156 void *data) 2157 { 2158 int err; 2159 struct super_block *sb = path->mnt->mnt_sb; 2160 struct mount *mnt = real_mount(path->mnt); 2161 2162 if (!check_mnt(mnt)) 2163 return -EINVAL; 2164 2165 if (path->dentry != path->mnt->mnt_root) 2166 return -EINVAL; 2167 2168 /* Don't allow changing of locked mnt flags. 2169 * 2170 * No locks need to be held here while testing the various 2171 * MNT_LOCK flags because those flags can never be cleared 2172 * once they are set. 2173 */ 2174 if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) && 2175 !(mnt_flags & MNT_READONLY)) { 2176 return -EPERM; 2177 } 2178 if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) && 2179 !(mnt_flags & MNT_NODEV)) { 2180 /* Was the nodev implicitly added in mount? */ 2181 if ((mnt->mnt_ns->user_ns != &init_user_ns) && 2182 !(sb->s_type->fs_flags & FS_USERNS_DEV_MOUNT)) { 2183 mnt_flags |= MNT_NODEV; 2184 } else { 2185 return -EPERM; 2186 } 2187 } 2188 if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) && 2189 !(mnt_flags & MNT_NOSUID)) { 2190 return -EPERM; 2191 } 2192 if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) && 2193 !(mnt_flags & MNT_NOEXEC)) { 2194 return -EPERM; 2195 } 2196 if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) && 2197 ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) { 2198 return -EPERM; 2199 } 2200 2201 err = security_sb_remount(sb, data); 2202 if (err) 2203 return err; 2204 2205 down_write(&sb->s_umount); 2206 if (flags & MS_BIND) 2207 err = change_mount_flags(path->mnt, flags); 2208 else if (!capable(CAP_SYS_ADMIN)) 2209 err = -EPERM; 2210 else 2211 err = do_remount_sb(sb, flags, data, 0); 2212 if (!err) { 2213 lock_mount_hash(); 2214 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK; 2215 mnt->mnt.mnt_flags = mnt_flags; 2216 touch_mnt_namespace(mnt->mnt_ns); 2217 unlock_mount_hash(); 2218 } 2219 up_write(&sb->s_umount); 2220 return err; 2221 } 2222 2223 static inline int tree_contains_unbindable(struct mount *mnt) 2224 { 2225 struct mount *p; 2226 for (p = mnt; p; p = next_mnt(p, mnt)) { 2227 if (IS_MNT_UNBINDABLE(p)) 2228 return 1; 2229 } 2230 return 0; 2231 } 2232 2233 static int do_move_mount(struct path *path, const char *old_name) 2234 { 2235 struct path old_path, parent_path; 2236 struct mount *p; 2237 struct mount *old; 2238 struct mountpoint *mp; 2239 int err; 2240 if (!old_name || !*old_name) 2241 return -EINVAL; 2242 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); 2243 if (err) 2244 return err; 2245 2246 mp = lock_mount(path); 2247 err = PTR_ERR(mp); 2248 if (IS_ERR(mp)) 2249 goto out; 2250 2251 old = real_mount(old_path.mnt); 2252 p = real_mount(path->mnt); 2253 2254 err = -EINVAL; 2255 if (!check_mnt(p) || !check_mnt(old)) 2256 goto out1; 2257 2258 if (old->mnt.mnt_flags & MNT_LOCKED) 2259 goto out1; 2260 2261 err = -EINVAL; 2262 if (old_path.dentry != old_path.mnt->mnt_root) 2263 goto out1; 2264 2265 if (!mnt_has_parent(old)) 2266 goto out1; 2267 2268 if (d_is_dir(path->dentry) != 2269 d_is_dir(old_path.dentry)) 2270 goto out1; 2271 /* 2272 * Don't move a mount residing in a shared parent. 2273 */ 2274 if (IS_MNT_SHARED(old->mnt_parent)) 2275 goto out1; 2276 /* 2277 * Don't move a mount tree containing unbindable mounts to a destination 2278 * mount which is shared. 2279 */ 2280 if (IS_MNT_SHARED(p) && tree_contains_unbindable(old)) 2281 goto out1; 2282 err = -ELOOP; 2283 for (; mnt_has_parent(p); p = p->mnt_parent) 2284 if (p == old) 2285 goto out1; 2286 2287 err = attach_recursive_mnt(old, real_mount(path->mnt), mp, &parent_path); 2288 if (err) 2289 goto out1; 2290 2291 /* if the mount is moved, it should no longer be expire 2292 * automatically */ 2293 list_del_init(&old->mnt_expire); 2294 out1: 2295 unlock_mount(mp); 2296 out: 2297 if (!err) 2298 path_put(&parent_path); 2299 path_put(&old_path); 2300 return err; 2301 } 2302 2303 static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype) 2304 { 2305 int err; 2306 const char *subtype = strchr(fstype, '.'); 2307 if (subtype) { 2308 subtype++; 2309 err = -EINVAL; 2310 if (!subtype[0]) 2311 goto err; 2312 } else 2313 subtype = ""; 2314 2315 mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL); 2316 err = -ENOMEM; 2317 if (!mnt->mnt_sb->s_subtype) 2318 goto err; 2319 return mnt; 2320 2321 err: 2322 mntput(mnt); 2323 return ERR_PTR(err); 2324 } 2325 2326 /* 2327 * add a mount into a namespace's mount tree 2328 */ 2329 static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags) 2330 { 2331 struct mountpoint *mp; 2332 struct mount *parent; 2333 int err; 2334 2335 mnt_flags &= ~MNT_INTERNAL_FLAGS; 2336 2337 mp = lock_mount(path); 2338 if (IS_ERR(mp)) 2339 return PTR_ERR(mp); 2340 2341 parent = real_mount(path->mnt); 2342 err = -EINVAL; 2343 if (unlikely(!check_mnt(parent))) { 2344 /* that's acceptable only for automounts done in private ns */ 2345 if (!(mnt_flags & MNT_SHRINKABLE)) 2346 goto unlock; 2347 /* ... and for those we'd better have mountpoint still alive */ 2348 if (!parent->mnt_ns) 2349 goto unlock; 2350 } 2351 2352 /* Refuse the same filesystem on the same mount point */ 2353 err = -EBUSY; 2354 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && 2355 path->mnt->mnt_root == path->dentry) 2356 goto unlock; 2357 2358 err = -EINVAL; 2359 if (d_is_symlink(newmnt->mnt.mnt_root)) 2360 goto unlock; 2361 2362 newmnt->mnt.mnt_flags = mnt_flags; 2363 err = graft_tree(newmnt, parent, mp); 2364 2365 unlock: 2366 unlock_mount(mp); 2367 return err; 2368 } 2369 2370 static bool fs_fully_visible(struct file_system_type *fs_type, int *new_mnt_flags); 2371 2372 /* 2373 * create a new mount for userspace and request it to be added into the 2374 * namespace's tree 2375 */ 2376 static int do_new_mount(struct path *path, const char *fstype, int flags, 2377 int mnt_flags, const char *name, void *data) 2378 { 2379 struct file_system_type *type; 2380 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; 2381 struct vfsmount *mnt; 2382 int err; 2383 2384 if (!fstype) 2385 return -EINVAL; 2386 2387 type = get_fs_type(fstype); 2388 if (!type) 2389 return -ENODEV; 2390 2391 if (user_ns != &init_user_ns) { 2392 if (!(type->fs_flags & FS_USERNS_MOUNT)) { 2393 put_filesystem(type); 2394 return -EPERM; 2395 } 2396 /* Only in special cases allow devices from mounts 2397 * created outside the initial user namespace. 2398 */ 2399 if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) { 2400 flags |= MS_NODEV; 2401 mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV; 2402 } 2403 if (type->fs_flags & FS_USERNS_VISIBLE) { 2404 if (!fs_fully_visible(type, &mnt_flags)) 2405 return -EPERM; 2406 } 2407 } 2408 2409 mnt = vfs_kern_mount(type, flags, name, data); 2410 if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) && 2411 !mnt->mnt_sb->s_subtype) 2412 mnt = fs_set_subtype(mnt, fstype); 2413 2414 put_filesystem(type); 2415 if (IS_ERR(mnt)) 2416 return PTR_ERR(mnt); 2417 2418 err = do_add_mount(real_mount(mnt), path, mnt_flags); 2419 if (err) 2420 mntput(mnt); 2421 return err; 2422 } 2423 2424 int finish_automount(struct vfsmount *m, struct path *path) 2425 { 2426 struct mount *mnt = real_mount(m); 2427 int err; 2428 /* The new mount record should have at least 2 refs to prevent it being 2429 * expired before we get a chance to add it 2430 */ 2431 BUG_ON(mnt_get_count(mnt) < 2); 2432 2433 if (m->mnt_sb == path->mnt->mnt_sb && 2434 m->mnt_root == path->dentry) { 2435 err = -ELOOP; 2436 goto fail; 2437 } 2438 2439 err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE); 2440 if (!err) 2441 return 0; 2442 fail: 2443 /* remove m from any expiration list it may be on */ 2444 if (!list_empty(&mnt->mnt_expire)) { 2445 namespace_lock(); 2446 list_del_init(&mnt->mnt_expire); 2447 namespace_unlock(); 2448 } 2449 mntput(m); 2450 mntput(m); 2451 return err; 2452 } 2453 2454 /** 2455 * mnt_set_expiry - Put a mount on an expiration list 2456 * @mnt: The mount to list. 2457 * @expiry_list: The list to add the mount to. 2458 */ 2459 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) 2460 { 2461 namespace_lock(); 2462 2463 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); 2464 2465 namespace_unlock(); 2466 } 2467 EXPORT_SYMBOL(mnt_set_expiry); 2468 2469 /* 2470 * process a list of expirable mountpoints with the intent of discarding any 2471 * mountpoints that aren't in use and haven't been touched since last we came 2472 * here 2473 */ 2474 void mark_mounts_for_expiry(struct list_head *mounts) 2475 { 2476 struct mount *mnt, *next; 2477 LIST_HEAD(graveyard); 2478 2479 if (list_empty(mounts)) 2480 return; 2481 2482 namespace_lock(); 2483 lock_mount_hash(); 2484 2485 /* extract from the expiration list every vfsmount that matches the 2486 * following criteria: 2487 * - only referenced by its parent vfsmount 2488 * - still marked for expiry (marked on the last call here; marks are 2489 * cleared by mntput()) 2490 */ 2491 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) { 2492 if (!xchg(&mnt->mnt_expiry_mark, 1) || 2493 propagate_mount_busy(mnt, 1)) 2494 continue; 2495 list_move(&mnt->mnt_expire, &graveyard); 2496 } 2497 while (!list_empty(&graveyard)) { 2498 mnt = list_first_entry(&graveyard, struct mount, mnt_expire); 2499 touch_mnt_namespace(mnt->mnt_ns); 2500 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); 2501 } 2502 unlock_mount_hash(); 2503 namespace_unlock(); 2504 } 2505 2506 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry); 2507 2508 /* 2509 * Ripoff of 'select_parent()' 2510 * 2511 * search the list of submounts for a given mountpoint, and move any 2512 * shrinkable submounts to the 'graveyard' list. 2513 */ 2514 static int select_submounts(struct mount *parent, struct list_head *graveyard) 2515 { 2516 struct mount *this_parent = parent; 2517 struct list_head *next; 2518 int found = 0; 2519 2520 repeat: 2521 next = this_parent->mnt_mounts.next; 2522 resume: 2523 while (next != &this_parent->mnt_mounts) { 2524 struct list_head *tmp = next; 2525 struct mount *mnt = list_entry(tmp, struct mount, mnt_child); 2526 2527 next = tmp->next; 2528 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE)) 2529 continue; 2530 /* 2531 * Descend a level if the d_mounts list is non-empty. 2532 */ 2533 if (!list_empty(&mnt->mnt_mounts)) { 2534 this_parent = mnt; 2535 goto repeat; 2536 } 2537 2538 if (!propagate_mount_busy(mnt, 1)) { 2539 list_move_tail(&mnt->mnt_expire, graveyard); 2540 found++; 2541 } 2542 } 2543 /* 2544 * All done at this level ... ascend and resume the search 2545 */ 2546 if (this_parent != parent) { 2547 next = this_parent->mnt_child.next; 2548 this_parent = this_parent->mnt_parent; 2549 goto resume; 2550 } 2551 return found; 2552 } 2553 2554 /* 2555 * process a list of expirable mountpoints with the intent of discarding any 2556 * submounts of a specific parent mountpoint 2557 * 2558 * mount_lock must be held for write 2559 */ 2560 static void shrink_submounts(struct mount *mnt) 2561 { 2562 LIST_HEAD(graveyard); 2563 struct mount *m; 2564 2565 /* extract submounts of 'mountpoint' from the expiration list */ 2566 while (select_submounts(mnt, &graveyard)) { 2567 while (!list_empty(&graveyard)) { 2568 m = list_first_entry(&graveyard, struct mount, 2569 mnt_expire); 2570 touch_mnt_namespace(m->mnt_ns); 2571 umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC); 2572 } 2573 } 2574 } 2575 2576 /* 2577 * Some copy_from_user() implementations do not return the exact number of 2578 * bytes remaining to copy on a fault. But copy_mount_options() requires that. 2579 * Note that this function differs from copy_from_user() in that it will oops 2580 * on bad values of `to', rather than returning a short copy. 2581 */ 2582 static long exact_copy_from_user(void *to, const void __user * from, 2583 unsigned long n) 2584 { 2585 char *t = to; 2586 const char __user *f = from; 2587 char c; 2588 2589 if (!access_ok(VERIFY_READ, from, n)) 2590 return n; 2591 2592 while (n) { 2593 if (__get_user(c, f)) { 2594 memset(t, 0, n); 2595 break; 2596 } 2597 *t++ = c; 2598 f++; 2599 n--; 2600 } 2601 return n; 2602 } 2603 2604 int copy_mount_options(const void __user * data, unsigned long *where) 2605 { 2606 int i; 2607 unsigned long page; 2608 unsigned long size; 2609 2610 *where = 0; 2611 if (!data) 2612 return 0; 2613 2614 if (!(page = __get_free_page(GFP_KERNEL))) 2615 return -ENOMEM; 2616 2617 /* We only care that *some* data at the address the user 2618 * gave us is valid. Just in case, we'll zero 2619 * the remainder of the page. 2620 */ 2621 /* copy_from_user cannot cross TASK_SIZE ! */ 2622 size = TASK_SIZE - (unsigned long)data; 2623 if (size > PAGE_SIZE) 2624 size = PAGE_SIZE; 2625 2626 i = size - exact_copy_from_user((void *)page, data, size); 2627 if (!i) { 2628 free_page(page); 2629 return -EFAULT; 2630 } 2631 if (i != PAGE_SIZE) 2632 memset((char *)page + i, 0, PAGE_SIZE - i); 2633 *where = page; 2634 return 0; 2635 } 2636 2637 char *copy_mount_string(const void __user *data) 2638 { 2639 return data ? strndup_user(data, PAGE_SIZE) : NULL; 2640 } 2641 2642 /* 2643 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to 2644 * be given to the mount() call (ie: read-only, no-dev, no-suid etc). 2645 * 2646 * data is a (void *) that can point to any structure up to 2647 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent 2648 * information (or be NULL). 2649 * 2650 * Pre-0.97 versions of mount() didn't have a flags word. 2651 * When the flags word was introduced its top half was required 2652 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9. 2653 * Therefore, if this magic number is present, it carries no information 2654 * and must be discarded. 2655 */ 2656 long do_mount(const char *dev_name, const char __user *dir_name, 2657 const char *type_page, unsigned long flags, void *data_page) 2658 { 2659 struct path path; 2660 int retval = 0; 2661 int mnt_flags = 0; 2662 2663 /* Discard magic */ 2664 if ((flags & MS_MGC_MSK) == MS_MGC_VAL) 2665 flags &= ~MS_MGC_MSK; 2666 2667 /* Basic sanity checks */ 2668 if (data_page) 2669 ((char *)data_page)[PAGE_SIZE - 1] = 0; 2670 2671 /* ... and get the mountpoint */ 2672 retval = user_path(dir_name, &path); 2673 if (retval) 2674 return retval; 2675 2676 retval = security_sb_mount(dev_name, &path, 2677 type_page, flags, data_page); 2678 if (!retval && !may_mount()) 2679 retval = -EPERM; 2680 if (retval) 2681 goto dput_out; 2682 2683 /* Default to relatime unless overriden */ 2684 if (!(flags & MS_NOATIME)) 2685 mnt_flags |= MNT_RELATIME; 2686 2687 /* Separate the per-mountpoint flags */ 2688 if (flags & MS_NOSUID) 2689 mnt_flags |= MNT_NOSUID; 2690 if (flags & MS_NODEV) 2691 mnt_flags |= MNT_NODEV; 2692 if (flags & MS_NOEXEC) 2693 mnt_flags |= MNT_NOEXEC; 2694 if (flags & MS_NOATIME) 2695 mnt_flags |= MNT_NOATIME; 2696 if (flags & MS_NODIRATIME) 2697 mnt_flags |= MNT_NODIRATIME; 2698 if (flags & MS_STRICTATIME) 2699 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); 2700 if (flags & MS_RDONLY) 2701 mnt_flags |= MNT_READONLY; 2702 2703 /* The default atime for remount is preservation */ 2704 if ((flags & MS_REMOUNT) && 2705 ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME | 2706 MS_STRICTATIME)) == 0)) { 2707 mnt_flags &= ~MNT_ATIME_MASK; 2708 mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK; 2709 } 2710 2711 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN | 2712 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | 2713 MS_STRICTATIME); 2714 2715 if (flags & MS_REMOUNT) 2716 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, 2717 data_page); 2718 else if (flags & MS_BIND) 2719 retval = do_loopback(&path, dev_name, flags & MS_REC); 2720 else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) 2721 retval = do_change_type(&path, flags); 2722 else if (flags & MS_MOVE) 2723 retval = do_move_mount(&path, dev_name); 2724 else 2725 retval = do_new_mount(&path, type_page, flags, mnt_flags, 2726 dev_name, data_page); 2727 dput_out: 2728 path_put(&path); 2729 return retval; 2730 } 2731 2732 static void free_mnt_ns(struct mnt_namespace *ns) 2733 { 2734 ns_free_inum(&ns->ns); 2735 put_user_ns(ns->user_ns); 2736 kfree(ns); 2737 } 2738 2739 /* 2740 * Assign a sequence number so we can detect when we attempt to bind 2741 * mount a reference to an older mount namespace into the current 2742 * mount namespace, preventing reference counting loops. A 64bit 2743 * number incrementing at 10Ghz will take 12,427 years to wrap which 2744 * is effectively never, so we can ignore the possibility. 2745 */ 2746 static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1); 2747 2748 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) 2749 { 2750 struct mnt_namespace *new_ns; 2751 int ret; 2752 2753 new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL); 2754 if (!new_ns) 2755 return ERR_PTR(-ENOMEM); 2756 ret = ns_alloc_inum(&new_ns->ns); 2757 if (ret) { 2758 kfree(new_ns); 2759 return ERR_PTR(ret); 2760 } 2761 new_ns->ns.ops = &mntns_operations; 2762 new_ns->seq = atomic64_add_return(1, &mnt_ns_seq); 2763 atomic_set(&new_ns->count, 1); 2764 new_ns->root = NULL; 2765 INIT_LIST_HEAD(&new_ns->list); 2766 init_waitqueue_head(&new_ns->poll); 2767 new_ns->event = 0; 2768 new_ns->user_ns = get_user_ns(user_ns); 2769 return new_ns; 2770 } 2771 2772 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, 2773 struct user_namespace *user_ns, struct fs_struct *new_fs) 2774 { 2775 struct mnt_namespace *new_ns; 2776 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL; 2777 struct mount *p, *q; 2778 struct mount *old; 2779 struct mount *new; 2780 int copy_flags; 2781 2782 BUG_ON(!ns); 2783 2784 if (likely(!(flags & CLONE_NEWNS))) { 2785 get_mnt_ns(ns); 2786 return ns; 2787 } 2788 2789 old = ns->root; 2790 2791 new_ns = alloc_mnt_ns(user_ns); 2792 if (IS_ERR(new_ns)) 2793 return new_ns; 2794 2795 namespace_lock(); 2796 /* First pass: copy the tree topology */ 2797 copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE; 2798 if (user_ns != ns->user_ns) 2799 copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED; 2800 new = copy_tree(old, old->mnt.mnt_root, copy_flags); 2801 if (IS_ERR(new)) { 2802 namespace_unlock(); 2803 free_mnt_ns(new_ns); 2804 return ERR_CAST(new); 2805 } 2806 new_ns->root = new; 2807 list_add_tail(&new_ns->list, &new->mnt_list); 2808 2809 /* 2810 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts 2811 * as belonging to new namespace. We have already acquired a private 2812 * fs_struct, so tsk->fs->lock is not needed. 2813 */ 2814 p = old; 2815 q = new; 2816 while (p) { 2817 q->mnt_ns = new_ns; 2818 if (new_fs) { 2819 if (&p->mnt == new_fs->root.mnt) { 2820 new_fs->root.mnt = mntget(&q->mnt); 2821 rootmnt = &p->mnt; 2822 } 2823 if (&p->mnt == new_fs->pwd.mnt) { 2824 new_fs->pwd.mnt = mntget(&q->mnt); 2825 pwdmnt = &p->mnt; 2826 } 2827 } 2828 p = next_mnt(p, old); 2829 q = next_mnt(q, new); 2830 if (!q) 2831 break; 2832 while (p->mnt.mnt_root != q->mnt.mnt_root) 2833 p = next_mnt(p, old); 2834 } 2835 namespace_unlock(); 2836 2837 if (rootmnt) 2838 mntput(rootmnt); 2839 if (pwdmnt) 2840 mntput(pwdmnt); 2841 2842 return new_ns; 2843 } 2844 2845 /** 2846 * create_mnt_ns - creates a private namespace and adds a root filesystem 2847 * @mnt: pointer to the new root filesystem mountpoint 2848 */ 2849 static struct mnt_namespace *create_mnt_ns(struct vfsmount *m) 2850 { 2851 struct mnt_namespace *new_ns = alloc_mnt_ns(&init_user_ns); 2852 if (!IS_ERR(new_ns)) { 2853 struct mount *mnt = real_mount(m); 2854 mnt->mnt_ns = new_ns; 2855 new_ns->root = mnt; 2856 list_add(&mnt->mnt_list, &new_ns->list); 2857 } else { 2858 mntput(m); 2859 } 2860 return new_ns; 2861 } 2862 2863 struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) 2864 { 2865 struct mnt_namespace *ns; 2866 struct super_block *s; 2867 struct path path; 2868 int err; 2869 2870 ns = create_mnt_ns(mnt); 2871 if (IS_ERR(ns)) 2872 return ERR_CAST(ns); 2873 2874 err = vfs_path_lookup(mnt->mnt_root, mnt, 2875 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); 2876 2877 put_mnt_ns(ns); 2878 2879 if (err) 2880 return ERR_PTR(err); 2881 2882 /* trade a vfsmount reference for active sb one */ 2883 s = path.mnt->mnt_sb; 2884 atomic_inc(&s->s_active); 2885 mntput(path.mnt); 2886 /* lock the sucker */ 2887 down_write(&s->s_umount); 2888 /* ... and return the root of (sub)tree on it */ 2889 return path.dentry; 2890 } 2891 EXPORT_SYMBOL(mount_subtree); 2892 2893 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, 2894 char __user *, type, unsigned long, flags, void __user *, data) 2895 { 2896 int ret; 2897 char *kernel_type; 2898 char *kernel_dev; 2899 unsigned long data_page; 2900 2901 kernel_type = copy_mount_string(type); 2902 ret = PTR_ERR(kernel_type); 2903 if (IS_ERR(kernel_type)) 2904 goto out_type; 2905 2906 kernel_dev = copy_mount_string(dev_name); 2907 ret = PTR_ERR(kernel_dev); 2908 if (IS_ERR(kernel_dev)) 2909 goto out_dev; 2910 2911 ret = copy_mount_options(data, &data_page); 2912 if (ret < 0) 2913 goto out_data; 2914 2915 ret = do_mount(kernel_dev, dir_name, kernel_type, flags, 2916 (void *) data_page); 2917 2918 free_page(data_page); 2919 out_data: 2920 kfree(kernel_dev); 2921 out_dev: 2922 kfree(kernel_type); 2923 out_type: 2924 return ret; 2925 } 2926 2927 /* 2928 * Return true if path is reachable from root 2929 * 2930 * namespace_sem or mount_lock is held 2931 */ 2932 bool is_path_reachable(struct mount *mnt, struct dentry *dentry, 2933 const struct path *root) 2934 { 2935 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) { 2936 dentry = mnt->mnt_mountpoint; 2937 mnt = mnt->mnt_parent; 2938 } 2939 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry); 2940 } 2941 2942 int path_is_under(struct path *path1, struct path *path2) 2943 { 2944 int res; 2945 read_seqlock_excl(&mount_lock); 2946 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2); 2947 read_sequnlock_excl(&mount_lock); 2948 return res; 2949 } 2950 EXPORT_SYMBOL(path_is_under); 2951 2952 /* 2953 * pivot_root Semantics: 2954 * Moves the root file system of the current process to the directory put_old, 2955 * makes new_root as the new root file system of the current process, and sets 2956 * root/cwd of all processes which had them on the current root to new_root. 2957 * 2958 * Restrictions: 2959 * The new_root and put_old must be directories, and must not be on the 2960 * same file system as the current process root. The put_old must be 2961 * underneath new_root, i.e. adding a non-zero number of /.. to the string 2962 * pointed to by put_old must yield the same directory as new_root. No other 2963 * file system may be mounted on put_old. After all, new_root is a mountpoint. 2964 * 2965 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem. 2966 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives 2967 * in this situation. 2968 * 2969 * Notes: 2970 * - we don't move root/cwd if they are not at the root (reason: if something 2971 * cared enough to change them, it's probably wrong to force them elsewhere) 2972 * - it's okay to pick a root that isn't the root of a file system, e.g. 2973 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint, 2974 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root 2975 * first. 2976 */ 2977 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, 2978 const char __user *, put_old) 2979 { 2980 struct path new, old, parent_path, root_parent, root; 2981 struct mount *new_mnt, *root_mnt, *old_mnt; 2982 struct mountpoint *old_mp, *root_mp; 2983 int error; 2984 2985 if (!may_mount()) 2986 return -EPERM; 2987 2988 error = user_path_dir(new_root, &new); 2989 if (error) 2990 goto out0; 2991 2992 error = user_path_dir(put_old, &old); 2993 if (error) 2994 goto out1; 2995 2996 error = security_sb_pivotroot(&old, &new); 2997 if (error) 2998 goto out2; 2999 3000 get_fs_root(current->fs, &root); 3001 old_mp = lock_mount(&old); 3002 error = PTR_ERR(old_mp); 3003 if (IS_ERR(old_mp)) 3004 goto out3; 3005 3006 error = -EINVAL; 3007 new_mnt = real_mount(new.mnt); 3008 root_mnt = real_mount(root.mnt); 3009 old_mnt = real_mount(old.mnt); 3010 if (IS_MNT_SHARED(old_mnt) || 3011 IS_MNT_SHARED(new_mnt->mnt_parent) || 3012 IS_MNT_SHARED(root_mnt->mnt_parent)) 3013 goto out4; 3014 if (!check_mnt(root_mnt) || !check_mnt(new_mnt)) 3015 goto out4; 3016 if (new_mnt->mnt.mnt_flags & MNT_LOCKED) 3017 goto out4; 3018 error = -ENOENT; 3019 if (d_unlinked(new.dentry)) 3020 goto out4; 3021 error = -EBUSY; 3022 if (new_mnt == root_mnt || old_mnt == root_mnt) 3023 goto out4; /* loop, on the same file system */ 3024 error = -EINVAL; 3025 if (root.mnt->mnt_root != root.dentry) 3026 goto out4; /* not a mountpoint */ 3027 if (!mnt_has_parent(root_mnt)) 3028 goto out4; /* not attached */ 3029 root_mp = root_mnt->mnt_mp; 3030 if (new.mnt->mnt_root != new.dentry) 3031 goto out4; /* not a mountpoint */ 3032 if (!mnt_has_parent(new_mnt)) 3033 goto out4; /* not attached */ 3034 /* make sure we can reach put_old from new_root */ 3035 if (!is_path_reachable(old_mnt, old.dentry, &new)) 3036 goto out4; 3037 /* make certain new is below the root */ 3038 if (!is_path_reachable(new_mnt, new.dentry, &root)) 3039 goto out4; 3040 root_mp->m_count++; /* pin it so it won't go away */ 3041 lock_mount_hash(); 3042 detach_mnt(new_mnt, &parent_path); 3043 detach_mnt(root_mnt, &root_parent); 3044 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) { 3045 new_mnt->mnt.mnt_flags |= MNT_LOCKED; 3046 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED; 3047 } 3048 /* mount old root on put_old */ 3049 attach_mnt(root_mnt, old_mnt, old_mp); 3050 /* mount new_root on / */ 3051 attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp); 3052 touch_mnt_namespace(current->nsproxy->mnt_ns); 3053 /* A moved mount should not expire automatically */ 3054 list_del_init(&new_mnt->mnt_expire); 3055 unlock_mount_hash(); 3056 chroot_fs_refs(&root, &new); 3057 put_mountpoint(root_mp); 3058 error = 0; 3059 out4: 3060 unlock_mount(old_mp); 3061 if (!error) { 3062 path_put(&root_parent); 3063 path_put(&parent_path); 3064 } 3065 out3: 3066 path_put(&root); 3067 out2: 3068 path_put(&old); 3069 out1: 3070 path_put(&new); 3071 out0: 3072 return error; 3073 } 3074 3075 static void __init init_mount_tree(void) 3076 { 3077 struct vfsmount *mnt; 3078 struct mnt_namespace *ns; 3079 struct path root; 3080 struct file_system_type *type; 3081 3082 type = get_fs_type("rootfs"); 3083 if (!type) 3084 panic("Can't find rootfs type"); 3085 mnt = vfs_kern_mount(type, 0, "rootfs", NULL); 3086 put_filesystem(type); 3087 if (IS_ERR(mnt)) 3088 panic("Can't create rootfs"); 3089 3090 ns = create_mnt_ns(mnt); 3091 if (IS_ERR(ns)) 3092 panic("Can't allocate initial namespace"); 3093 3094 init_task.nsproxy->mnt_ns = ns; 3095 get_mnt_ns(ns); 3096 3097 root.mnt = mnt; 3098 root.dentry = mnt->mnt_root; 3099 mnt->mnt_flags |= MNT_LOCKED; 3100 3101 set_fs_pwd(current->fs, &root); 3102 set_fs_root(current->fs, &root); 3103 } 3104 3105 void __init mnt_init(void) 3106 { 3107 unsigned u; 3108 int err; 3109 3110 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount), 3111 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 3112 3113 mount_hashtable = alloc_large_system_hash("Mount-cache", 3114 sizeof(struct hlist_head), 3115 mhash_entries, 19, 3116 0, 3117 &m_hash_shift, &m_hash_mask, 0, 0); 3118 mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache", 3119 sizeof(struct hlist_head), 3120 mphash_entries, 19, 3121 0, 3122 &mp_hash_shift, &mp_hash_mask, 0, 0); 3123 3124 if (!mount_hashtable || !mountpoint_hashtable) 3125 panic("Failed to allocate mount hash table\n"); 3126 3127 for (u = 0; u <= m_hash_mask; u++) 3128 INIT_HLIST_HEAD(&mount_hashtable[u]); 3129 for (u = 0; u <= mp_hash_mask; u++) 3130 INIT_HLIST_HEAD(&mountpoint_hashtable[u]); 3131 3132 kernfs_init(); 3133 3134 err = sysfs_init(); 3135 if (err) 3136 printk(KERN_WARNING "%s: sysfs_init error: %d\n", 3137 __func__, err); 3138 fs_kobj = kobject_create_and_add("fs", NULL); 3139 if (!fs_kobj) 3140 printk(KERN_WARNING "%s: kobj create error\n", __func__); 3141 init_rootfs(); 3142 init_mount_tree(); 3143 } 3144 3145 void put_mnt_ns(struct mnt_namespace *ns) 3146 { 3147 if (!atomic_dec_and_test(&ns->count)) 3148 return; 3149 drop_collected_mounts(&ns->root->mnt); 3150 free_mnt_ns(ns); 3151 } 3152 3153 struct vfsmount *kern_mount_data(struct file_system_type *type, void *data) 3154 { 3155 struct vfsmount *mnt; 3156 mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, data); 3157 if (!IS_ERR(mnt)) { 3158 /* 3159 * it is a longterm mount, don't release mnt until 3160 * we unmount before file sys is unregistered 3161 */ 3162 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL; 3163 } 3164 return mnt; 3165 } 3166 EXPORT_SYMBOL_GPL(kern_mount_data); 3167 3168 void kern_unmount(struct vfsmount *mnt) 3169 { 3170 /* release long term mount so mount point can be released */ 3171 if (!IS_ERR_OR_NULL(mnt)) { 3172 real_mount(mnt)->mnt_ns = NULL; 3173 synchronize_rcu(); /* yecchhh... */ 3174 mntput(mnt); 3175 } 3176 } 3177 EXPORT_SYMBOL(kern_unmount); 3178 3179 bool our_mnt(struct vfsmount *mnt) 3180 { 3181 return check_mnt(real_mount(mnt)); 3182 } 3183 3184 bool current_chrooted(void) 3185 { 3186 /* Does the current process have a non-standard root */ 3187 struct path ns_root; 3188 struct path fs_root; 3189 bool chrooted; 3190 3191 /* Find the namespace root */ 3192 ns_root.mnt = ¤t->nsproxy->mnt_ns->root->mnt; 3193 ns_root.dentry = ns_root.mnt->mnt_root; 3194 path_get(&ns_root); 3195 while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root)) 3196 ; 3197 3198 get_fs_root(current->fs, &fs_root); 3199 3200 chrooted = !path_equal(&fs_root, &ns_root); 3201 3202 path_put(&fs_root); 3203 path_put(&ns_root); 3204 3205 return chrooted; 3206 } 3207 3208 static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags) 3209 { 3210 struct mnt_namespace *ns = current->nsproxy->mnt_ns; 3211 int new_flags = *new_mnt_flags; 3212 struct mount *mnt; 3213 bool visible = false; 3214 3215 if (unlikely(!ns)) 3216 return false; 3217 3218 down_read(&namespace_sem); 3219 list_for_each_entry(mnt, &ns->list, mnt_list) { 3220 struct mount *child; 3221 if (mnt->mnt.mnt_sb->s_type != type) 3222 continue; 3223 3224 /* This mount is not fully visible if it's root directory 3225 * is not the root directory of the filesystem. 3226 */ 3227 if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root) 3228 continue; 3229 3230 /* Verify the mount flags are equal to or more permissive 3231 * than the proposed new mount. 3232 */ 3233 if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) && 3234 !(new_flags & MNT_READONLY)) 3235 continue; 3236 if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) && 3237 !(new_flags & MNT_NODEV)) 3238 continue; 3239 if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) && 3240 ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK))) 3241 continue; 3242 3243 /* This mount is not fully visible if there are any 3244 * locked child mounts that cover anything except for 3245 * empty directories. 3246 */ 3247 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { 3248 struct inode *inode = child->mnt_mountpoint->d_inode; 3249 /* Only worry about locked mounts */ 3250 if (!(mnt->mnt.mnt_flags & MNT_LOCKED)) 3251 continue; 3252 /* Is the directory permanetly empty? */ 3253 if (!is_empty_dir_inode(inode)) 3254 goto next; 3255 } 3256 /* Preserve the locked attributes */ 3257 *new_mnt_flags |= mnt->mnt.mnt_flags & (MNT_LOCK_READONLY | \ 3258 MNT_LOCK_NODEV | \ 3259 MNT_LOCK_ATIME); 3260 visible = true; 3261 goto found; 3262 next: ; 3263 } 3264 found: 3265 up_read(&namespace_sem); 3266 return visible; 3267 } 3268 3269 static struct ns_common *mntns_get(struct task_struct *task) 3270 { 3271 struct ns_common *ns = NULL; 3272 struct nsproxy *nsproxy; 3273 3274 task_lock(task); 3275 nsproxy = task->nsproxy; 3276 if (nsproxy) { 3277 ns = &nsproxy->mnt_ns->ns; 3278 get_mnt_ns(to_mnt_ns(ns)); 3279 } 3280 task_unlock(task); 3281 3282 return ns; 3283 } 3284 3285 static void mntns_put(struct ns_common *ns) 3286 { 3287 put_mnt_ns(to_mnt_ns(ns)); 3288 } 3289 3290 static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns) 3291 { 3292 struct fs_struct *fs = current->fs; 3293 struct mnt_namespace *mnt_ns = to_mnt_ns(ns); 3294 struct path root; 3295 3296 if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) || 3297 !ns_capable(current_user_ns(), CAP_SYS_CHROOT) || 3298 !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) 3299 return -EPERM; 3300 3301 if (fs->users != 1) 3302 return -EINVAL; 3303 3304 get_mnt_ns(mnt_ns); 3305 put_mnt_ns(nsproxy->mnt_ns); 3306 nsproxy->mnt_ns = mnt_ns; 3307 3308 /* Find the root */ 3309 root.mnt = &mnt_ns->root->mnt; 3310 root.dentry = mnt_ns->root->mnt.mnt_root; 3311 path_get(&root); 3312 while(d_mountpoint(root.dentry) && follow_down_one(&root)) 3313 ; 3314 3315 /* Update the pwd and root */ 3316 set_fs_pwd(fs, &root); 3317 set_fs_root(fs, &root); 3318 3319 path_put(&root); 3320 return 0; 3321 } 3322 3323 const struct proc_ns_operations mntns_operations = { 3324 .name = "mnt", 3325 .type = CLONE_NEWNS, 3326 .get = mntns_get, 3327 .put = mntns_put, 3328 .install = mntns_install, 3329 }; 3330