1 /* 2 * linux/fs/namespace.c 3 * 4 * (C) Copyright Al Viro 2000, 2001 5 * Released under GPL v2. 6 * 7 * Based on code from fs/super.c, copyright Linus Torvalds and others. 8 * Heavily rewritten. 9 */ 10 11 #include <linux/syscalls.h> 12 #include <linux/export.h> 13 #include <linux/capability.h> 14 #include <linux/mnt_namespace.h> 15 #include <linux/user_namespace.h> 16 #include <linux/namei.h> 17 #include <linux/security.h> 18 #include <linux/idr.h> 19 #include <linux/acct.h> /* acct_auto_close_mnt */ 20 #include <linux/init.h> /* init_rootfs */ 21 #include <linux/fs_struct.h> /* get_fs_root et.al. */ 22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */ 23 #include <linux/uaccess.h> 24 #include <linux/proc_ns.h> 25 #include <linux/magic.h> 26 #include "pnode.h" 27 #include "internal.h" 28 29 #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head)) 30 #define HASH_SIZE (1UL << HASH_SHIFT) 31 32 static int event; 33 static DEFINE_IDA(mnt_id_ida); 34 static DEFINE_IDA(mnt_group_ida); 35 static DEFINE_SPINLOCK(mnt_id_lock); 36 static int mnt_id_start = 0; 37 static int mnt_group_start = 1; 38 39 static struct list_head *mount_hashtable __read_mostly; 40 static struct list_head *mountpoint_hashtable __read_mostly; 41 static struct kmem_cache *mnt_cache __read_mostly; 42 static DECLARE_RWSEM(namespace_sem); 43 44 /* /sys/fs */ 45 struct kobject *fs_kobj; 46 EXPORT_SYMBOL_GPL(fs_kobj); 47 48 /* 49 * vfsmount lock may be taken for read to prevent changes to the 50 * vfsmount hash, ie. during mountpoint lookups or walking back 51 * up the tree. 52 * 53 * It should be taken for write in all cases where the vfsmount 54 * tree or hash is modified or when a vfsmount structure is modified. 55 */ 56 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); 57 58 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) 59 { 60 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); 61 tmp += ((unsigned long)dentry / L1_CACHE_BYTES); 62 tmp = tmp + (tmp >> HASH_SHIFT); 63 return tmp & (HASH_SIZE - 1); 64 } 65 66 /* 67 * allocation is serialized by namespace_sem, but we need the spinlock to 68 * serialize with freeing. 69 */ 70 static int mnt_alloc_id(struct mount *mnt) 71 { 72 int res; 73 74 retry: 75 ida_pre_get(&mnt_id_ida, GFP_KERNEL); 76 spin_lock(&mnt_id_lock); 77 res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id); 78 if (!res) 79 mnt_id_start = mnt->mnt_id + 1; 80 spin_unlock(&mnt_id_lock); 81 if (res == -EAGAIN) 82 goto retry; 83 84 return res; 85 } 86 87 static void mnt_free_id(struct mount *mnt) 88 { 89 int id = mnt->mnt_id; 90 spin_lock(&mnt_id_lock); 91 ida_remove(&mnt_id_ida, id); 92 if (mnt_id_start > id) 93 mnt_id_start = id; 94 spin_unlock(&mnt_id_lock); 95 } 96 97 /* 98 * Allocate a new peer group ID 99 * 100 * mnt_group_ida is protected by namespace_sem 101 */ 102 static int mnt_alloc_group_id(struct mount *mnt) 103 { 104 int res; 105 106 if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL)) 107 return -ENOMEM; 108 109 res = ida_get_new_above(&mnt_group_ida, 110 mnt_group_start, 111 &mnt->mnt_group_id); 112 if (!res) 113 mnt_group_start = mnt->mnt_group_id + 1; 114 115 return res; 116 } 117 118 /* 119 * Release a peer group ID 120 */ 121 void mnt_release_group_id(struct mount *mnt) 122 { 123 int id = mnt->mnt_group_id; 124 ida_remove(&mnt_group_ida, id); 125 if (mnt_group_start > id) 126 mnt_group_start = id; 127 mnt->mnt_group_id = 0; 128 } 129 130 /* 131 * vfsmount lock must be held for read 132 */ 133 static inline void mnt_add_count(struct mount *mnt, int n) 134 { 135 #ifdef CONFIG_SMP 136 this_cpu_add(mnt->mnt_pcp->mnt_count, n); 137 #else 138 preempt_disable(); 139 mnt->mnt_count += n; 140 preempt_enable(); 141 #endif 142 } 143 144 /* 145 * vfsmount lock must be held for write 146 */ 147 unsigned int mnt_get_count(struct mount *mnt) 148 { 149 #ifdef CONFIG_SMP 150 unsigned int count = 0; 151 int cpu; 152 153 for_each_possible_cpu(cpu) { 154 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count; 155 } 156 157 return count; 158 #else 159 return mnt->mnt_count; 160 #endif 161 } 162 163 static struct mount *alloc_vfsmnt(const char *name) 164 { 165 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); 166 if (mnt) { 167 int err; 168 169 err = mnt_alloc_id(mnt); 170 if (err) 171 goto out_free_cache; 172 173 if (name) { 174 mnt->mnt_devname = kstrdup(name, GFP_KERNEL); 175 if (!mnt->mnt_devname) 176 goto out_free_id; 177 } 178 179 #ifdef CONFIG_SMP 180 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp); 181 if (!mnt->mnt_pcp) 182 goto out_free_devname; 183 184 this_cpu_add(mnt->mnt_pcp->mnt_count, 1); 185 #else 186 mnt->mnt_count = 1; 187 mnt->mnt_writers = 0; 188 #endif 189 190 INIT_LIST_HEAD(&mnt->mnt_hash); 191 INIT_LIST_HEAD(&mnt->mnt_child); 192 INIT_LIST_HEAD(&mnt->mnt_mounts); 193 INIT_LIST_HEAD(&mnt->mnt_list); 194 INIT_LIST_HEAD(&mnt->mnt_expire); 195 INIT_LIST_HEAD(&mnt->mnt_share); 196 INIT_LIST_HEAD(&mnt->mnt_slave_list); 197 INIT_LIST_HEAD(&mnt->mnt_slave); 198 #ifdef CONFIG_FSNOTIFY 199 INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks); 200 #endif 201 } 202 return mnt; 203 204 #ifdef CONFIG_SMP 205 out_free_devname: 206 kfree(mnt->mnt_devname); 207 #endif 208 out_free_id: 209 mnt_free_id(mnt); 210 out_free_cache: 211 kmem_cache_free(mnt_cache, mnt); 212 return NULL; 213 } 214 215 /* 216 * Most r/o checks on a fs are for operations that take 217 * discrete amounts of time, like a write() or unlink(). 218 * We must keep track of when those operations start 219 * (for permission checks) and when they end, so that 220 * we can determine when writes are able to occur to 221 * a filesystem. 222 */ 223 /* 224 * __mnt_is_readonly: check whether a mount is read-only 225 * @mnt: the mount to check for its write status 226 * 227 * This shouldn't be used directly ouside of the VFS. 228 * It does not guarantee that the filesystem will stay 229 * r/w, just that it is right *now*. This can not and 230 * should not be used in place of IS_RDONLY(inode). 231 * mnt_want/drop_write() will _keep_ the filesystem 232 * r/w. 233 */ 234 int __mnt_is_readonly(struct vfsmount *mnt) 235 { 236 if (mnt->mnt_flags & MNT_READONLY) 237 return 1; 238 if (mnt->mnt_sb->s_flags & MS_RDONLY) 239 return 1; 240 return 0; 241 } 242 EXPORT_SYMBOL_GPL(__mnt_is_readonly); 243 244 static inline void mnt_inc_writers(struct mount *mnt) 245 { 246 #ifdef CONFIG_SMP 247 this_cpu_inc(mnt->mnt_pcp->mnt_writers); 248 #else 249 mnt->mnt_writers++; 250 #endif 251 } 252 253 static inline void mnt_dec_writers(struct mount *mnt) 254 { 255 #ifdef CONFIG_SMP 256 this_cpu_dec(mnt->mnt_pcp->mnt_writers); 257 #else 258 mnt->mnt_writers--; 259 #endif 260 } 261 262 static unsigned int mnt_get_writers(struct mount *mnt) 263 { 264 #ifdef CONFIG_SMP 265 unsigned int count = 0; 266 int cpu; 267 268 for_each_possible_cpu(cpu) { 269 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers; 270 } 271 272 return count; 273 #else 274 return mnt->mnt_writers; 275 #endif 276 } 277 278 static int mnt_is_readonly(struct vfsmount *mnt) 279 { 280 if (mnt->mnt_sb->s_readonly_remount) 281 return 1; 282 /* Order wrt setting s_flags/s_readonly_remount in do_remount() */ 283 smp_rmb(); 284 return __mnt_is_readonly(mnt); 285 } 286 287 /* 288 * Most r/o & frozen checks on a fs are for operations that take discrete 289 * amounts of time, like a write() or unlink(). We must keep track of when 290 * those operations start (for permission checks) and when they end, so that we 291 * can determine when writes are able to occur to a filesystem. 292 */ 293 /** 294 * __mnt_want_write - get write access to a mount without freeze protection 295 * @m: the mount on which to take a write 296 * 297 * This tells the low-level filesystem that a write is about to be performed to 298 * it, and makes sure that writes are allowed (mnt it read-write) before 299 * returning success. This operation does not protect against filesystem being 300 * frozen. When the write operation is finished, __mnt_drop_write() must be 301 * called. This is effectively a refcount. 302 */ 303 int __mnt_want_write(struct vfsmount *m) 304 { 305 struct mount *mnt = real_mount(m); 306 int ret = 0; 307 308 preempt_disable(); 309 mnt_inc_writers(mnt); 310 /* 311 * The store to mnt_inc_writers must be visible before we pass 312 * MNT_WRITE_HOLD loop below, so that the slowpath can see our 313 * incremented count after it has set MNT_WRITE_HOLD. 314 */ 315 smp_mb(); 316 while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) 317 cpu_relax(); 318 /* 319 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will 320 * be set to match its requirements. So we must not load that until 321 * MNT_WRITE_HOLD is cleared. 322 */ 323 smp_rmb(); 324 if (mnt_is_readonly(m)) { 325 mnt_dec_writers(mnt); 326 ret = -EROFS; 327 } 328 preempt_enable(); 329 330 return ret; 331 } 332 333 /** 334 * mnt_want_write - get write access to a mount 335 * @m: the mount on which to take a write 336 * 337 * This tells the low-level filesystem that a write is about to be performed to 338 * it, and makes sure that writes are allowed (mount is read-write, filesystem 339 * is not frozen) before returning success. When the write operation is 340 * finished, mnt_drop_write() must be called. This is effectively a refcount. 341 */ 342 int mnt_want_write(struct vfsmount *m) 343 { 344 int ret; 345 346 sb_start_write(m->mnt_sb); 347 ret = __mnt_want_write(m); 348 if (ret) 349 sb_end_write(m->mnt_sb); 350 return ret; 351 } 352 EXPORT_SYMBOL_GPL(mnt_want_write); 353 354 /** 355 * mnt_clone_write - get write access to a mount 356 * @mnt: the mount on which to take a write 357 * 358 * This is effectively like mnt_want_write, except 359 * it must only be used to take an extra write reference 360 * on a mountpoint that we already know has a write reference 361 * on it. This allows some optimisation. 362 * 363 * After finished, mnt_drop_write must be called as usual to 364 * drop the reference. 365 */ 366 int mnt_clone_write(struct vfsmount *mnt) 367 { 368 /* superblock may be r/o */ 369 if (__mnt_is_readonly(mnt)) 370 return -EROFS; 371 preempt_disable(); 372 mnt_inc_writers(real_mount(mnt)); 373 preempt_enable(); 374 return 0; 375 } 376 EXPORT_SYMBOL_GPL(mnt_clone_write); 377 378 /** 379 * __mnt_want_write_file - get write access to a file's mount 380 * @file: the file who's mount on which to take a write 381 * 382 * This is like __mnt_want_write, but it takes a file and can 383 * do some optimisations if the file is open for write already 384 */ 385 int __mnt_want_write_file(struct file *file) 386 { 387 struct inode *inode = file_inode(file); 388 389 if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode)) 390 return __mnt_want_write(file->f_path.mnt); 391 else 392 return mnt_clone_write(file->f_path.mnt); 393 } 394 395 /** 396 * mnt_want_write_file - get write access to a file's mount 397 * @file: the file who's mount on which to take a write 398 * 399 * This is like mnt_want_write, but it takes a file and can 400 * do some optimisations if the file is open for write already 401 */ 402 int mnt_want_write_file(struct file *file) 403 { 404 int ret; 405 406 sb_start_write(file->f_path.mnt->mnt_sb); 407 ret = __mnt_want_write_file(file); 408 if (ret) 409 sb_end_write(file->f_path.mnt->mnt_sb); 410 return ret; 411 } 412 EXPORT_SYMBOL_GPL(mnt_want_write_file); 413 414 /** 415 * __mnt_drop_write - give up write access to a mount 416 * @mnt: the mount on which to give up write access 417 * 418 * Tells the low-level filesystem that we are done 419 * performing writes to it. Must be matched with 420 * __mnt_want_write() call above. 421 */ 422 void __mnt_drop_write(struct vfsmount *mnt) 423 { 424 preempt_disable(); 425 mnt_dec_writers(real_mount(mnt)); 426 preempt_enable(); 427 } 428 429 /** 430 * mnt_drop_write - give up write access to a mount 431 * @mnt: the mount on which to give up write access 432 * 433 * Tells the low-level filesystem that we are done performing writes to it and 434 * also allows filesystem to be frozen again. Must be matched with 435 * mnt_want_write() call above. 436 */ 437 void mnt_drop_write(struct vfsmount *mnt) 438 { 439 __mnt_drop_write(mnt); 440 sb_end_write(mnt->mnt_sb); 441 } 442 EXPORT_SYMBOL_GPL(mnt_drop_write); 443 444 void __mnt_drop_write_file(struct file *file) 445 { 446 __mnt_drop_write(file->f_path.mnt); 447 } 448 449 void mnt_drop_write_file(struct file *file) 450 { 451 mnt_drop_write(file->f_path.mnt); 452 } 453 EXPORT_SYMBOL(mnt_drop_write_file); 454 455 static int mnt_make_readonly(struct mount *mnt) 456 { 457 int ret = 0; 458 459 lock_mount_hash(); 460 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; 461 /* 462 * After storing MNT_WRITE_HOLD, we'll read the counters. This store 463 * should be visible before we do. 464 */ 465 smp_mb(); 466 467 /* 468 * With writers on hold, if this value is zero, then there are 469 * definitely no active writers (although held writers may subsequently 470 * increment the count, they'll have to wait, and decrement it after 471 * seeing MNT_READONLY). 472 * 473 * It is OK to have counter incremented on one CPU and decremented on 474 * another: the sum will add up correctly. The danger would be when we 475 * sum up each counter, if we read a counter before it is incremented, 476 * but then read another CPU's count which it has been subsequently 477 * decremented from -- we would see more decrements than we should. 478 * MNT_WRITE_HOLD protects against this scenario, because 479 * mnt_want_write first increments count, then smp_mb, then spins on 480 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while 481 * we're counting up here. 482 */ 483 if (mnt_get_writers(mnt) > 0) 484 ret = -EBUSY; 485 else 486 mnt->mnt.mnt_flags |= MNT_READONLY; 487 /* 488 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers 489 * that become unheld will see MNT_READONLY. 490 */ 491 smp_wmb(); 492 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; 493 unlock_mount_hash(); 494 return ret; 495 } 496 497 static void __mnt_unmake_readonly(struct mount *mnt) 498 { 499 lock_mount_hash(); 500 mnt->mnt.mnt_flags &= ~MNT_READONLY; 501 unlock_mount_hash(); 502 } 503 504 int sb_prepare_remount_readonly(struct super_block *sb) 505 { 506 struct mount *mnt; 507 int err = 0; 508 509 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */ 510 if (atomic_long_read(&sb->s_remove_count)) 511 return -EBUSY; 512 513 lock_mount_hash(); 514 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { 515 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) { 516 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; 517 smp_mb(); 518 if (mnt_get_writers(mnt) > 0) { 519 err = -EBUSY; 520 break; 521 } 522 } 523 } 524 if (!err && atomic_long_read(&sb->s_remove_count)) 525 err = -EBUSY; 526 527 if (!err) { 528 sb->s_readonly_remount = 1; 529 smp_wmb(); 530 } 531 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { 532 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) 533 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; 534 } 535 unlock_mount_hash(); 536 537 return err; 538 } 539 540 static void free_vfsmnt(struct mount *mnt) 541 { 542 kfree(mnt->mnt_devname); 543 mnt_free_id(mnt); 544 #ifdef CONFIG_SMP 545 free_percpu(mnt->mnt_pcp); 546 #endif 547 kmem_cache_free(mnt_cache, mnt); 548 } 549 550 /* call under rcu_read_lock */ 551 bool legitimize_mnt(struct vfsmount *bastard, unsigned seq) 552 { 553 struct mount *mnt; 554 if (read_seqretry(&mount_lock, seq)) 555 return false; 556 if (bastard == NULL) 557 return true; 558 mnt = real_mount(bastard); 559 mnt_add_count(mnt, 1); 560 if (likely(!read_seqretry(&mount_lock, seq))) 561 return true; 562 if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { 563 mnt_add_count(mnt, -1); 564 return false; 565 } 566 rcu_read_unlock(); 567 mntput(bastard); 568 rcu_read_lock(); 569 return false; 570 } 571 572 /* 573 * find the first mount at @dentry on vfsmount @mnt. 574 * call under rcu_read_lock() 575 */ 576 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) 577 { 578 struct list_head *head = mount_hashtable + hash(mnt, dentry); 579 struct mount *p; 580 581 list_for_each_entry_rcu(p, head, mnt_hash) 582 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) 583 return p; 584 return NULL; 585 } 586 587 /* 588 * find the last mount at @dentry on vfsmount @mnt. 589 * mount_lock must be held. 590 */ 591 struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry) 592 { 593 struct list_head *head = mount_hashtable + hash(mnt, dentry); 594 struct mount *p; 595 596 list_for_each_entry_reverse(p, head, mnt_hash) 597 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) 598 return p; 599 return NULL; 600 } 601 602 /* 603 * lookup_mnt - Return the first child mount mounted at path 604 * 605 * "First" means first mounted chronologically. If you create the 606 * following mounts: 607 * 608 * mount /dev/sda1 /mnt 609 * mount /dev/sda2 /mnt 610 * mount /dev/sda3 /mnt 611 * 612 * Then lookup_mnt() on the base /mnt dentry in the root mount will 613 * return successively the root dentry and vfsmount of /dev/sda1, then 614 * /dev/sda2, then /dev/sda3, then NULL. 615 * 616 * lookup_mnt takes a reference to the found vfsmount. 617 */ 618 struct vfsmount *lookup_mnt(struct path *path) 619 { 620 struct mount *child_mnt; 621 struct vfsmount *m; 622 unsigned seq; 623 624 rcu_read_lock(); 625 do { 626 seq = read_seqbegin(&mount_lock); 627 child_mnt = __lookup_mnt(path->mnt, path->dentry); 628 m = child_mnt ? &child_mnt->mnt : NULL; 629 } while (!legitimize_mnt(m, seq)); 630 rcu_read_unlock(); 631 return m; 632 } 633 634 static struct mountpoint *new_mountpoint(struct dentry *dentry) 635 { 636 struct list_head *chain = mountpoint_hashtable + hash(NULL, dentry); 637 struct mountpoint *mp; 638 int ret; 639 640 list_for_each_entry(mp, chain, m_hash) { 641 if (mp->m_dentry == dentry) { 642 /* might be worth a WARN_ON() */ 643 if (d_unlinked(dentry)) 644 return ERR_PTR(-ENOENT); 645 mp->m_count++; 646 return mp; 647 } 648 } 649 650 mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL); 651 if (!mp) 652 return ERR_PTR(-ENOMEM); 653 654 ret = d_set_mounted(dentry); 655 if (ret) { 656 kfree(mp); 657 return ERR_PTR(ret); 658 } 659 660 mp->m_dentry = dentry; 661 mp->m_count = 1; 662 list_add(&mp->m_hash, chain); 663 return mp; 664 } 665 666 static void put_mountpoint(struct mountpoint *mp) 667 { 668 if (!--mp->m_count) { 669 struct dentry *dentry = mp->m_dentry; 670 spin_lock(&dentry->d_lock); 671 dentry->d_flags &= ~DCACHE_MOUNTED; 672 spin_unlock(&dentry->d_lock); 673 list_del(&mp->m_hash); 674 kfree(mp); 675 } 676 } 677 678 static inline int check_mnt(struct mount *mnt) 679 { 680 return mnt->mnt_ns == current->nsproxy->mnt_ns; 681 } 682 683 /* 684 * vfsmount lock must be held for write 685 */ 686 static void touch_mnt_namespace(struct mnt_namespace *ns) 687 { 688 if (ns) { 689 ns->event = ++event; 690 wake_up_interruptible(&ns->poll); 691 } 692 } 693 694 /* 695 * vfsmount lock must be held for write 696 */ 697 static void __touch_mnt_namespace(struct mnt_namespace *ns) 698 { 699 if (ns && ns->event != event) { 700 ns->event = event; 701 wake_up_interruptible(&ns->poll); 702 } 703 } 704 705 /* 706 * vfsmount lock must be held for write 707 */ 708 static void detach_mnt(struct mount *mnt, struct path *old_path) 709 { 710 old_path->dentry = mnt->mnt_mountpoint; 711 old_path->mnt = &mnt->mnt_parent->mnt; 712 mnt->mnt_parent = mnt; 713 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 714 list_del_init(&mnt->mnt_child); 715 list_del_init(&mnt->mnt_hash); 716 put_mountpoint(mnt->mnt_mp); 717 mnt->mnt_mp = NULL; 718 } 719 720 /* 721 * vfsmount lock must be held for write 722 */ 723 void mnt_set_mountpoint(struct mount *mnt, 724 struct mountpoint *mp, 725 struct mount *child_mnt) 726 { 727 mp->m_count++; 728 mnt_add_count(mnt, 1); /* essentially, that's mntget */ 729 child_mnt->mnt_mountpoint = dget(mp->m_dentry); 730 child_mnt->mnt_parent = mnt; 731 child_mnt->mnt_mp = mp; 732 } 733 734 /* 735 * vfsmount lock must be held for write 736 */ 737 static void attach_mnt(struct mount *mnt, 738 struct mount *parent, 739 struct mountpoint *mp) 740 { 741 mnt_set_mountpoint(parent, mp, mnt); 742 list_add_tail(&mnt->mnt_hash, mount_hashtable + 743 hash(&parent->mnt, mp->m_dentry)); 744 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); 745 } 746 747 /* 748 * vfsmount lock must be held for write 749 */ 750 static void commit_tree(struct mount *mnt) 751 { 752 struct mount *parent = mnt->mnt_parent; 753 struct mount *m; 754 LIST_HEAD(head); 755 struct mnt_namespace *n = parent->mnt_ns; 756 757 BUG_ON(parent == mnt); 758 759 list_add_tail(&head, &mnt->mnt_list); 760 list_for_each_entry(m, &head, mnt_list) 761 m->mnt_ns = n; 762 763 list_splice(&head, n->list.prev); 764 765 list_add_tail(&mnt->mnt_hash, mount_hashtable + 766 hash(&parent->mnt, mnt->mnt_mountpoint)); 767 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); 768 touch_mnt_namespace(n); 769 } 770 771 static struct mount *next_mnt(struct mount *p, struct mount *root) 772 { 773 struct list_head *next = p->mnt_mounts.next; 774 if (next == &p->mnt_mounts) { 775 while (1) { 776 if (p == root) 777 return NULL; 778 next = p->mnt_child.next; 779 if (next != &p->mnt_parent->mnt_mounts) 780 break; 781 p = p->mnt_parent; 782 } 783 } 784 return list_entry(next, struct mount, mnt_child); 785 } 786 787 static struct mount *skip_mnt_tree(struct mount *p) 788 { 789 struct list_head *prev = p->mnt_mounts.prev; 790 while (prev != &p->mnt_mounts) { 791 p = list_entry(prev, struct mount, mnt_child); 792 prev = p->mnt_mounts.prev; 793 } 794 return p; 795 } 796 797 struct vfsmount * 798 vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data) 799 { 800 struct mount *mnt; 801 struct dentry *root; 802 803 if (!type) 804 return ERR_PTR(-ENODEV); 805 806 mnt = alloc_vfsmnt(name); 807 if (!mnt) 808 return ERR_PTR(-ENOMEM); 809 810 if (flags & MS_KERNMOUNT) 811 mnt->mnt.mnt_flags = MNT_INTERNAL; 812 813 root = mount_fs(type, flags, name, data); 814 if (IS_ERR(root)) { 815 free_vfsmnt(mnt); 816 return ERR_CAST(root); 817 } 818 819 mnt->mnt.mnt_root = root; 820 mnt->mnt.mnt_sb = root->d_sb; 821 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 822 mnt->mnt_parent = mnt; 823 lock_mount_hash(); 824 list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts); 825 unlock_mount_hash(); 826 return &mnt->mnt; 827 } 828 EXPORT_SYMBOL_GPL(vfs_kern_mount); 829 830 static struct mount *clone_mnt(struct mount *old, struct dentry *root, 831 int flag) 832 { 833 struct super_block *sb = old->mnt.mnt_sb; 834 struct mount *mnt; 835 int err; 836 837 mnt = alloc_vfsmnt(old->mnt_devname); 838 if (!mnt) 839 return ERR_PTR(-ENOMEM); 840 841 if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE)) 842 mnt->mnt_group_id = 0; /* not a peer of original */ 843 else 844 mnt->mnt_group_id = old->mnt_group_id; 845 846 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { 847 err = mnt_alloc_group_id(mnt); 848 if (err) 849 goto out_free; 850 } 851 852 mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD; 853 /* Don't allow unprivileged users to change mount flags */ 854 if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY)) 855 mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; 856 857 /* Don't allow unprivileged users to reveal what is under a mount */ 858 if ((flag & CL_UNPRIVILEGED) && list_empty(&old->mnt_expire)) 859 mnt->mnt.mnt_flags |= MNT_LOCKED; 860 861 atomic_inc(&sb->s_active); 862 mnt->mnt.mnt_sb = sb; 863 mnt->mnt.mnt_root = dget(root); 864 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 865 mnt->mnt_parent = mnt; 866 lock_mount_hash(); 867 list_add_tail(&mnt->mnt_instance, &sb->s_mounts); 868 unlock_mount_hash(); 869 870 if ((flag & CL_SLAVE) || 871 ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) { 872 list_add(&mnt->mnt_slave, &old->mnt_slave_list); 873 mnt->mnt_master = old; 874 CLEAR_MNT_SHARED(mnt); 875 } else if (!(flag & CL_PRIVATE)) { 876 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old)) 877 list_add(&mnt->mnt_share, &old->mnt_share); 878 if (IS_MNT_SLAVE(old)) 879 list_add(&mnt->mnt_slave, &old->mnt_slave); 880 mnt->mnt_master = old->mnt_master; 881 } 882 if (flag & CL_MAKE_SHARED) 883 set_mnt_shared(mnt); 884 885 /* stick the duplicate mount on the same expiry list 886 * as the original if that was on one */ 887 if (flag & CL_EXPIRE) { 888 if (!list_empty(&old->mnt_expire)) 889 list_add(&mnt->mnt_expire, &old->mnt_expire); 890 } 891 892 return mnt; 893 894 out_free: 895 free_vfsmnt(mnt); 896 return ERR_PTR(err); 897 } 898 899 static void delayed_free(struct rcu_head *head) 900 { 901 struct mount *mnt = container_of(head, struct mount, mnt_rcu); 902 kfree(mnt->mnt_devname); 903 #ifdef CONFIG_SMP 904 free_percpu(mnt->mnt_pcp); 905 #endif 906 kmem_cache_free(mnt_cache, mnt); 907 } 908 909 static void mntput_no_expire(struct mount *mnt) 910 { 911 put_again: 912 rcu_read_lock(); 913 mnt_add_count(mnt, -1); 914 if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */ 915 rcu_read_unlock(); 916 return; 917 } 918 lock_mount_hash(); 919 if (mnt_get_count(mnt)) { 920 rcu_read_unlock(); 921 unlock_mount_hash(); 922 return; 923 } 924 if (unlikely(mnt->mnt_pinned)) { 925 mnt_add_count(mnt, mnt->mnt_pinned + 1); 926 mnt->mnt_pinned = 0; 927 rcu_read_unlock(); 928 unlock_mount_hash(); 929 acct_auto_close_mnt(&mnt->mnt); 930 goto put_again; 931 } 932 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) { 933 rcu_read_unlock(); 934 unlock_mount_hash(); 935 return; 936 } 937 mnt->mnt.mnt_flags |= MNT_DOOMED; 938 rcu_read_unlock(); 939 940 list_del(&mnt->mnt_instance); 941 unlock_mount_hash(); 942 943 /* 944 * This probably indicates that somebody messed 945 * up a mnt_want/drop_write() pair. If this 946 * happens, the filesystem was probably unable 947 * to make r/w->r/o transitions. 948 */ 949 /* 950 * The locking used to deal with mnt_count decrement provides barriers, 951 * so mnt_get_writers() below is safe. 952 */ 953 WARN_ON(mnt_get_writers(mnt)); 954 fsnotify_vfsmount_delete(&mnt->mnt); 955 dput(mnt->mnt.mnt_root); 956 deactivate_super(mnt->mnt.mnt_sb); 957 mnt_free_id(mnt); 958 call_rcu(&mnt->mnt_rcu, delayed_free); 959 } 960 961 void mntput(struct vfsmount *mnt) 962 { 963 if (mnt) { 964 struct mount *m = real_mount(mnt); 965 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */ 966 if (unlikely(m->mnt_expiry_mark)) 967 m->mnt_expiry_mark = 0; 968 mntput_no_expire(m); 969 } 970 } 971 EXPORT_SYMBOL(mntput); 972 973 struct vfsmount *mntget(struct vfsmount *mnt) 974 { 975 if (mnt) 976 mnt_add_count(real_mount(mnt), 1); 977 return mnt; 978 } 979 EXPORT_SYMBOL(mntget); 980 981 void mnt_pin(struct vfsmount *mnt) 982 { 983 lock_mount_hash(); 984 real_mount(mnt)->mnt_pinned++; 985 unlock_mount_hash(); 986 } 987 EXPORT_SYMBOL(mnt_pin); 988 989 void mnt_unpin(struct vfsmount *m) 990 { 991 struct mount *mnt = real_mount(m); 992 lock_mount_hash(); 993 if (mnt->mnt_pinned) { 994 mnt_add_count(mnt, 1); 995 mnt->mnt_pinned--; 996 } 997 unlock_mount_hash(); 998 } 999 EXPORT_SYMBOL(mnt_unpin); 1000 1001 static inline void mangle(struct seq_file *m, const char *s) 1002 { 1003 seq_escape(m, s, " \t\n\\"); 1004 } 1005 1006 /* 1007 * Simple .show_options callback for filesystems which don't want to 1008 * implement more complex mount option showing. 1009 * 1010 * See also save_mount_options(). 1011 */ 1012 int generic_show_options(struct seq_file *m, struct dentry *root) 1013 { 1014 const char *options; 1015 1016 rcu_read_lock(); 1017 options = rcu_dereference(root->d_sb->s_options); 1018 1019 if (options != NULL && options[0]) { 1020 seq_putc(m, ','); 1021 mangle(m, options); 1022 } 1023 rcu_read_unlock(); 1024 1025 return 0; 1026 } 1027 EXPORT_SYMBOL(generic_show_options); 1028 1029 /* 1030 * If filesystem uses generic_show_options(), this function should be 1031 * called from the fill_super() callback. 1032 * 1033 * The .remount_fs callback usually needs to be handled in a special 1034 * way, to make sure, that previous options are not overwritten if the 1035 * remount fails. 1036 * 1037 * Also note, that if the filesystem's .remount_fs function doesn't 1038 * reset all options to their default value, but changes only newly 1039 * given options, then the displayed options will not reflect reality 1040 * any more. 1041 */ 1042 void save_mount_options(struct super_block *sb, char *options) 1043 { 1044 BUG_ON(sb->s_options); 1045 rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL)); 1046 } 1047 EXPORT_SYMBOL(save_mount_options); 1048 1049 void replace_mount_options(struct super_block *sb, char *options) 1050 { 1051 char *old = sb->s_options; 1052 rcu_assign_pointer(sb->s_options, options); 1053 if (old) { 1054 synchronize_rcu(); 1055 kfree(old); 1056 } 1057 } 1058 EXPORT_SYMBOL(replace_mount_options); 1059 1060 #ifdef CONFIG_PROC_FS 1061 /* iterator; we want it to have access to namespace_sem, thus here... */ 1062 static void *m_start(struct seq_file *m, loff_t *pos) 1063 { 1064 struct proc_mounts *p = proc_mounts(m); 1065 1066 down_read(&namespace_sem); 1067 return seq_list_start(&p->ns->list, *pos); 1068 } 1069 1070 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 1071 { 1072 struct proc_mounts *p = proc_mounts(m); 1073 1074 return seq_list_next(v, &p->ns->list, pos); 1075 } 1076 1077 static void m_stop(struct seq_file *m, void *v) 1078 { 1079 up_read(&namespace_sem); 1080 } 1081 1082 static int m_show(struct seq_file *m, void *v) 1083 { 1084 struct proc_mounts *p = proc_mounts(m); 1085 struct mount *r = list_entry(v, struct mount, mnt_list); 1086 return p->show(m, &r->mnt); 1087 } 1088 1089 const struct seq_operations mounts_op = { 1090 .start = m_start, 1091 .next = m_next, 1092 .stop = m_stop, 1093 .show = m_show, 1094 }; 1095 #endif /* CONFIG_PROC_FS */ 1096 1097 /** 1098 * may_umount_tree - check if a mount tree is busy 1099 * @mnt: root of mount tree 1100 * 1101 * This is called to check if a tree of mounts has any 1102 * open files, pwds, chroots or sub mounts that are 1103 * busy. 1104 */ 1105 int may_umount_tree(struct vfsmount *m) 1106 { 1107 struct mount *mnt = real_mount(m); 1108 int actual_refs = 0; 1109 int minimum_refs = 0; 1110 struct mount *p; 1111 BUG_ON(!m); 1112 1113 /* write lock needed for mnt_get_count */ 1114 lock_mount_hash(); 1115 for (p = mnt; p; p = next_mnt(p, mnt)) { 1116 actual_refs += mnt_get_count(p); 1117 minimum_refs += 2; 1118 } 1119 unlock_mount_hash(); 1120 1121 if (actual_refs > minimum_refs) 1122 return 0; 1123 1124 return 1; 1125 } 1126 1127 EXPORT_SYMBOL(may_umount_tree); 1128 1129 /** 1130 * may_umount - check if a mount point is busy 1131 * @mnt: root of mount 1132 * 1133 * This is called to check if a mount point has any 1134 * open files, pwds, chroots or sub mounts. If the 1135 * mount has sub mounts this will return busy 1136 * regardless of whether the sub mounts are busy. 1137 * 1138 * Doesn't take quota and stuff into account. IOW, in some cases it will 1139 * give false negatives. The main reason why it's here is that we need 1140 * a non-destructive way to look for easily umountable filesystems. 1141 */ 1142 int may_umount(struct vfsmount *mnt) 1143 { 1144 int ret = 1; 1145 down_read(&namespace_sem); 1146 lock_mount_hash(); 1147 if (propagate_mount_busy(real_mount(mnt), 2)) 1148 ret = 0; 1149 unlock_mount_hash(); 1150 up_read(&namespace_sem); 1151 return ret; 1152 } 1153 1154 EXPORT_SYMBOL(may_umount); 1155 1156 static LIST_HEAD(unmounted); /* protected by namespace_sem */ 1157 1158 static void namespace_unlock(void) 1159 { 1160 struct mount *mnt; 1161 LIST_HEAD(head); 1162 1163 if (likely(list_empty(&unmounted))) { 1164 up_write(&namespace_sem); 1165 return; 1166 } 1167 1168 list_splice_init(&unmounted, &head); 1169 up_write(&namespace_sem); 1170 1171 synchronize_rcu(); 1172 1173 while (!list_empty(&head)) { 1174 mnt = list_first_entry(&head, struct mount, mnt_hash); 1175 list_del_init(&mnt->mnt_hash); 1176 if (mnt->mnt_ex_mountpoint.mnt) 1177 path_put(&mnt->mnt_ex_mountpoint); 1178 mntput(&mnt->mnt); 1179 } 1180 } 1181 1182 static inline void namespace_lock(void) 1183 { 1184 down_write(&namespace_sem); 1185 } 1186 1187 /* 1188 * mount_lock must be held 1189 * namespace_sem must be held for write 1190 * how = 0 => just this tree, don't propagate 1191 * how = 1 => propagate; we know that nobody else has reference to any victims 1192 * how = 2 => lazy umount 1193 */ 1194 void umount_tree(struct mount *mnt, int how) 1195 { 1196 LIST_HEAD(tmp_list); 1197 struct mount *p; 1198 1199 for (p = mnt; p; p = next_mnt(p, mnt)) 1200 list_move(&p->mnt_hash, &tmp_list); 1201 1202 if (how) 1203 propagate_umount(&tmp_list); 1204 1205 list_for_each_entry(p, &tmp_list, mnt_hash) { 1206 list_del_init(&p->mnt_expire); 1207 list_del_init(&p->mnt_list); 1208 __touch_mnt_namespace(p->mnt_ns); 1209 p->mnt_ns = NULL; 1210 if (how < 2) 1211 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; 1212 list_del_init(&p->mnt_child); 1213 if (mnt_has_parent(p)) { 1214 put_mountpoint(p->mnt_mp); 1215 /* move the reference to mountpoint into ->mnt_ex_mountpoint */ 1216 p->mnt_ex_mountpoint.dentry = p->mnt_mountpoint; 1217 p->mnt_ex_mountpoint.mnt = &p->mnt_parent->mnt; 1218 p->mnt_mountpoint = p->mnt.mnt_root; 1219 p->mnt_parent = p; 1220 p->mnt_mp = NULL; 1221 } 1222 change_mnt_propagation(p, MS_PRIVATE); 1223 } 1224 list_splice(&tmp_list, &unmounted); 1225 } 1226 1227 static void shrink_submounts(struct mount *mnt); 1228 1229 static int do_umount(struct mount *mnt, int flags) 1230 { 1231 struct super_block *sb = mnt->mnt.mnt_sb; 1232 int retval; 1233 1234 retval = security_sb_umount(&mnt->mnt, flags); 1235 if (retval) 1236 return retval; 1237 1238 /* 1239 * Allow userspace to request a mountpoint be expired rather than 1240 * unmounting unconditionally. Unmount only happens if: 1241 * (1) the mark is already set (the mark is cleared by mntput()) 1242 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] 1243 */ 1244 if (flags & MNT_EXPIRE) { 1245 if (&mnt->mnt == current->fs->root.mnt || 1246 flags & (MNT_FORCE | MNT_DETACH)) 1247 return -EINVAL; 1248 1249 /* 1250 * probably don't strictly need the lock here if we examined 1251 * all race cases, but it's a slowpath. 1252 */ 1253 lock_mount_hash(); 1254 if (mnt_get_count(mnt) != 2) { 1255 unlock_mount_hash(); 1256 return -EBUSY; 1257 } 1258 unlock_mount_hash(); 1259 1260 if (!xchg(&mnt->mnt_expiry_mark, 1)) 1261 return -EAGAIN; 1262 } 1263 1264 /* 1265 * If we may have to abort operations to get out of this 1266 * mount, and they will themselves hold resources we must 1267 * allow the fs to do things. In the Unix tradition of 1268 * 'Gee thats tricky lets do it in userspace' the umount_begin 1269 * might fail to complete on the first run through as other tasks 1270 * must return, and the like. Thats for the mount program to worry 1271 * about for the moment. 1272 */ 1273 1274 if (flags & MNT_FORCE && sb->s_op->umount_begin) { 1275 sb->s_op->umount_begin(sb); 1276 } 1277 1278 /* 1279 * No sense to grab the lock for this test, but test itself looks 1280 * somewhat bogus. Suggestions for better replacement? 1281 * Ho-hum... In principle, we might treat that as umount + switch 1282 * to rootfs. GC would eventually take care of the old vfsmount. 1283 * Actually it makes sense, especially if rootfs would contain a 1284 * /reboot - static binary that would close all descriptors and 1285 * call reboot(9). Then init(8) could umount root and exec /reboot. 1286 */ 1287 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { 1288 /* 1289 * Special case for "unmounting" root ... 1290 * we just try to remount it readonly. 1291 */ 1292 down_write(&sb->s_umount); 1293 if (!(sb->s_flags & MS_RDONLY)) 1294 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); 1295 up_write(&sb->s_umount); 1296 return retval; 1297 } 1298 1299 namespace_lock(); 1300 lock_mount_hash(); 1301 event++; 1302 1303 if (flags & MNT_DETACH) { 1304 if (!list_empty(&mnt->mnt_list)) 1305 umount_tree(mnt, 2); 1306 retval = 0; 1307 } else { 1308 shrink_submounts(mnt); 1309 retval = -EBUSY; 1310 if (!propagate_mount_busy(mnt, 2)) { 1311 if (!list_empty(&mnt->mnt_list)) 1312 umount_tree(mnt, 1); 1313 retval = 0; 1314 } 1315 } 1316 unlock_mount_hash(); 1317 namespace_unlock(); 1318 return retval; 1319 } 1320 1321 /* 1322 * Is the caller allowed to modify his namespace? 1323 */ 1324 static inline bool may_mount(void) 1325 { 1326 return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); 1327 } 1328 1329 /* 1330 * Now umount can handle mount points as well as block devices. 1331 * This is important for filesystems which use unnamed block devices. 1332 * 1333 * We now support a flag for forced unmount like the other 'big iron' 1334 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD 1335 */ 1336 1337 SYSCALL_DEFINE2(umount, char __user *, name, int, flags) 1338 { 1339 struct path path; 1340 struct mount *mnt; 1341 int retval; 1342 int lookup_flags = 0; 1343 1344 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) 1345 return -EINVAL; 1346 1347 if (!may_mount()) 1348 return -EPERM; 1349 1350 if (!(flags & UMOUNT_NOFOLLOW)) 1351 lookup_flags |= LOOKUP_FOLLOW; 1352 1353 retval = user_path_mountpoint_at(AT_FDCWD, name, lookup_flags, &path); 1354 if (retval) 1355 goto out; 1356 mnt = real_mount(path.mnt); 1357 retval = -EINVAL; 1358 if (path.dentry != path.mnt->mnt_root) 1359 goto dput_and_out; 1360 if (!check_mnt(mnt)) 1361 goto dput_and_out; 1362 if (mnt->mnt.mnt_flags & MNT_LOCKED) 1363 goto dput_and_out; 1364 1365 retval = do_umount(mnt, flags); 1366 dput_and_out: 1367 /* we mustn't call path_put() as that would clear mnt_expiry_mark */ 1368 dput(path.dentry); 1369 mntput_no_expire(mnt); 1370 out: 1371 return retval; 1372 } 1373 1374 #ifdef __ARCH_WANT_SYS_OLDUMOUNT 1375 1376 /* 1377 * The 2.0 compatible umount. No flags. 1378 */ 1379 SYSCALL_DEFINE1(oldumount, char __user *, name) 1380 { 1381 return sys_umount(name, 0); 1382 } 1383 1384 #endif 1385 1386 static bool is_mnt_ns_file(struct dentry *dentry) 1387 { 1388 /* Is this a proxy for a mount namespace? */ 1389 struct inode *inode = dentry->d_inode; 1390 struct proc_ns *ei; 1391 1392 if (!proc_ns_inode(inode)) 1393 return false; 1394 1395 ei = get_proc_ns(inode); 1396 if (ei->ns_ops != &mntns_operations) 1397 return false; 1398 1399 return true; 1400 } 1401 1402 static bool mnt_ns_loop(struct dentry *dentry) 1403 { 1404 /* Could bind mounting the mount namespace inode cause a 1405 * mount namespace loop? 1406 */ 1407 struct mnt_namespace *mnt_ns; 1408 if (!is_mnt_ns_file(dentry)) 1409 return false; 1410 1411 mnt_ns = get_proc_ns(dentry->d_inode)->ns; 1412 return current->nsproxy->mnt_ns->seq >= mnt_ns->seq; 1413 } 1414 1415 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, 1416 int flag) 1417 { 1418 struct mount *res, *p, *q, *r, *parent; 1419 1420 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt)) 1421 return ERR_PTR(-EINVAL); 1422 1423 if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry)) 1424 return ERR_PTR(-EINVAL); 1425 1426 res = q = clone_mnt(mnt, dentry, flag); 1427 if (IS_ERR(q)) 1428 return q; 1429 1430 q->mnt.mnt_flags &= ~MNT_LOCKED; 1431 q->mnt_mountpoint = mnt->mnt_mountpoint; 1432 1433 p = mnt; 1434 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) { 1435 struct mount *s; 1436 if (!is_subdir(r->mnt_mountpoint, dentry)) 1437 continue; 1438 1439 for (s = r; s; s = next_mnt(s, r)) { 1440 if (!(flag & CL_COPY_UNBINDABLE) && 1441 IS_MNT_UNBINDABLE(s)) { 1442 s = skip_mnt_tree(s); 1443 continue; 1444 } 1445 if (!(flag & CL_COPY_MNT_NS_FILE) && 1446 is_mnt_ns_file(s->mnt.mnt_root)) { 1447 s = skip_mnt_tree(s); 1448 continue; 1449 } 1450 while (p != s->mnt_parent) { 1451 p = p->mnt_parent; 1452 q = q->mnt_parent; 1453 } 1454 p = s; 1455 parent = q; 1456 q = clone_mnt(p, p->mnt.mnt_root, flag); 1457 if (IS_ERR(q)) 1458 goto out; 1459 lock_mount_hash(); 1460 list_add_tail(&q->mnt_list, &res->mnt_list); 1461 attach_mnt(q, parent, p->mnt_mp); 1462 unlock_mount_hash(); 1463 } 1464 } 1465 return res; 1466 out: 1467 if (res) { 1468 lock_mount_hash(); 1469 umount_tree(res, 0); 1470 unlock_mount_hash(); 1471 } 1472 return q; 1473 } 1474 1475 /* Caller should check returned pointer for errors */ 1476 1477 struct vfsmount *collect_mounts(struct path *path) 1478 { 1479 struct mount *tree; 1480 namespace_lock(); 1481 tree = copy_tree(real_mount(path->mnt), path->dentry, 1482 CL_COPY_ALL | CL_PRIVATE); 1483 namespace_unlock(); 1484 if (IS_ERR(tree)) 1485 return ERR_CAST(tree); 1486 return &tree->mnt; 1487 } 1488 1489 void drop_collected_mounts(struct vfsmount *mnt) 1490 { 1491 namespace_lock(); 1492 lock_mount_hash(); 1493 umount_tree(real_mount(mnt), 0); 1494 unlock_mount_hash(); 1495 namespace_unlock(); 1496 } 1497 1498 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, 1499 struct vfsmount *root) 1500 { 1501 struct mount *mnt; 1502 int res = f(root, arg); 1503 if (res) 1504 return res; 1505 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) { 1506 res = f(&mnt->mnt, arg); 1507 if (res) 1508 return res; 1509 } 1510 return 0; 1511 } 1512 1513 static void cleanup_group_ids(struct mount *mnt, struct mount *end) 1514 { 1515 struct mount *p; 1516 1517 for (p = mnt; p != end; p = next_mnt(p, mnt)) { 1518 if (p->mnt_group_id && !IS_MNT_SHARED(p)) 1519 mnt_release_group_id(p); 1520 } 1521 } 1522 1523 static int invent_group_ids(struct mount *mnt, bool recurse) 1524 { 1525 struct mount *p; 1526 1527 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) { 1528 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) { 1529 int err = mnt_alloc_group_id(p); 1530 if (err) { 1531 cleanup_group_ids(mnt, p); 1532 return err; 1533 } 1534 } 1535 } 1536 1537 return 0; 1538 } 1539 1540 /* 1541 * @source_mnt : mount tree to be attached 1542 * @nd : place the mount tree @source_mnt is attached 1543 * @parent_nd : if non-null, detach the source_mnt from its parent and 1544 * store the parent mount and mountpoint dentry. 1545 * (done when source_mnt is moved) 1546 * 1547 * NOTE: in the table below explains the semantics when a source mount 1548 * of a given type is attached to a destination mount of a given type. 1549 * --------------------------------------------------------------------------- 1550 * | BIND MOUNT OPERATION | 1551 * |************************************************************************** 1552 * | source-->| shared | private | slave | unbindable | 1553 * | dest | | | | | 1554 * | | | | | | | 1555 * | v | | | | | 1556 * |************************************************************************** 1557 * | shared | shared (++) | shared (+) | shared(+++)| invalid | 1558 * | | | | | | 1559 * |non-shared| shared (+) | private | slave (*) | invalid | 1560 * *************************************************************************** 1561 * A bind operation clones the source mount and mounts the clone on the 1562 * destination mount. 1563 * 1564 * (++) the cloned mount is propagated to all the mounts in the propagation 1565 * tree of the destination mount and the cloned mount is added to 1566 * the peer group of the source mount. 1567 * (+) the cloned mount is created under the destination mount and is marked 1568 * as shared. The cloned mount is added to the peer group of the source 1569 * mount. 1570 * (+++) the mount is propagated to all the mounts in the propagation tree 1571 * of the destination mount and the cloned mount is made slave 1572 * of the same master as that of the source mount. The cloned mount 1573 * is marked as 'shared and slave'. 1574 * (*) the cloned mount is made a slave of the same master as that of the 1575 * source mount. 1576 * 1577 * --------------------------------------------------------------------------- 1578 * | MOVE MOUNT OPERATION | 1579 * |************************************************************************** 1580 * | source-->| shared | private | slave | unbindable | 1581 * | dest | | | | | 1582 * | | | | | | | 1583 * | v | | | | | 1584 * |************************************************************************** 1585 * | shared | shared (+) | shared (+) | shared(+++) | invalid | 1586 * | | | | | | 1587 * |non-shared| shared (+*) | private | slave (*) | unbindable | 1588 * *************************************************************************** 1589 * 1590 * (+) the mount is moved to the destination. And is then propagated to 1591 * all the mounts in the propagation tree of the destination mount. 1592 * (+*) the mount is moved to the destination. 1593 * (+++) the mount is moved to the destination and is then propagated to 1594 * all the mounts belonging to the destination mount's propagation tree. 1595 * the mount is marked as 'shared and slave'. 1596 * (*) the mount continues to be a slave at the new location. 1597 * 1598 * if the source mount is a tree, the operations explained above is 1599 * applied to each mount in the tree. 1600 * Must be called without spinlocks held, since this function can sleep 1601 * in allocations. 1602 */ 1603 static int attach_recursive_mnt(struct mount *source_mnt, 1604 struct mount *dest_mnt, 1605 struct mountpoint *dest_mp, 1606 struct path *parent_path) 1607 { 1608 LIST_HEAD(tree_list); 1609 struct mount *child, *p; 1610 int err; 1611 1612 if (IS_MNT_SHARED(dest_mnt)) { 1613 err = invent_group_ids(source_mnt, true); 1614 if (err) 1615 goto out; 1616 } 1617 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); 1618 if (err) 1619 goto out_cleanup_ids; 1620 1621 lock_mount_hash(); 1622 1623 if (IS_MNT_SHARED(dest_mnt)) { 1624 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 1625 set_mnt_shared(p); 1626 } 1627 if (parent_path) { 1628 detach_mnt(source_mnt, parent_path); 1629 attach_mnt(source_mnt, dest_mnt, dest_mp); 1630 touch_mnt_namespace(source_mnt->mnt_ns); 1631 } else { 1632 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); 1633 commit_tree(source_mnt); 1634 } 1635 1636 list_for_each_entry_safe(child, p, &tree_list, mnt_hash) { 1637 list_del_init(&child->mnt_hash); 1638 commit_tree(child); 1639 } 1640 unlock_mount_hash(); 1641 1642 return 0; 1643 1644 out_cleanup_ids: 1645 if (IS_MNT_SHARED(dest_mnt)) 1646 cleanup_group_ids(source_mnt, NULL); 1647 out: 1648 return err; 1649 } 1650 1651 static struct mountpoint *lock_mount(struct path *path) 1652 { 1653 struct vfsmount *mnt; 1654 struct dentry *dentry = path->dentry; 1655 retry: 1656 mutex_lock(&dentry->d_inode->i_mutex); 1657 if (unlikely(cant_mount(dentry))) { 1658 mutex_unlock(&dentry->d_inode->i_mutex); 1659 return ERR_PTR(-ENOENT); 1660 } 1661 namespace_lock(); 1662 mnt = lookup_mnt(path); 1663 if (likely(!mnt)) { 1664 struct mountpoint *mp = new_mountpoint(dentry); 1665 if (IS_ERR(mp)) { 1666 namespace_unlock(); 1667 mutex_unlock(&dentry->d_inode->i_mutex); 1668 return mp; 1669 } 1670 return mp; 1671 } 1672 namespace_unlock(); 1673 mutex_unlock(&path->dentry->d_inode->i_mutex); 1674 path_put(path); 1675 path->mnt = mnt; 1676 dentry = path->dentry = dget(mnt->mnt_root); 1677 goto retry; 1678 } 1679 1680 static void unlock_mount(struct mountpoint *where) 1681 { 1682 struct dentry *dentry = where->m_dentry; 1683 put_mountpoint(where); 1684 namespace_unlock(); 1685 mutex_unlock(&dentry->d_inode->i_mutex); 1686 } 1687 1688 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp) 1689 { 1690 if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER) 1691 return -EINVAL; 1692 1693 if (S_ISDIR(mp->m_dentry->d_inode->i_mode) != 1694 S_ISDIR(mnt->mnt.mnt_root->d_inode->i_mode)) 1695 return -ENOTDIR; 1696 1697 return attach_recursive_mnt(mnt, p, mp, NULL); 1698 } 1699 1700 /* 1701 * Sanity check the flags to change_mnt_propagation. 1702 */ 1703 1704 static int flags_to_propagation_type(int flags) 1705 { 1706 int type = flags & ~(MS_REC | MS_SILENT); 1707 1708 /* Fail if any non-propagation flags are set */ 1709 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) 1710 return 0; 1711 /* Only one propagation flag should be set */ 1712 if (!is_power_of_2(type)) 1713 return 0; 1714 return type; 1715 } 1716 1717 /* 1718 * recursively change the type of the mountpoint. 1719 */ 1720 static int do_change_type(struct path *path, int flag) 1721 { 1722 struct mount *m; 1723 struct mount *mnt = real_mount(path->mnt); 1724 int recurse = flag & MS_REC; 1725 int type; 1726 int err = 0; 1727 1728 if (path->dentry != path->mnt->mnt_root) 1729 return -EINVAL; 1730 1731 type = flags_to_propagation_type(flag); 1732 if (!type) 1733 return -EINVAL; 1734 1735 namespace_lock(); 1736 if (type == MS_SHARED) { 1737 err = invent_group_ids(mnt, recurse); 1738 if (err) 1739 goto out_unlock; 1740 } 1741 1742 lock_mount_hash(); 1743 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) 1744 change_mnt_propagation(m, type); 1745 unlock_mount_hash(); 1746 1747 out_unlock: 1748 namespace_unlock(); 1749 return err; 1750 } 1751 1752 static bool has_locked_children(struct mount *mnt, struct dentry *dentry) 1753 { 1754 struct mount *child; 1755 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { 1756 if (!is_subdir(child->mnt_mountpoint, dentry)) 1757 continue; 1758 1759 if (child->mnt.mnt_flags & MNT_LOCKED) 1760 return true; 1761 } 1762 return false; 1763 } 1764 1765 /* 1766 * do loopback mount. 1767 */ 1768 static int do_loopback(struct path *path, const char *old_name, 1769 int recurse) 1770 { 1771 struct path old_path; 1772 struct mount *mnt = NULL, *old, *parent; 1773 struct mountpoint *mp; 1774 int err; 1775 if (!old_name || !*old_name) 1776 return -EINVAL; 1777 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path); 1778 if (err) 1779 return err; 1780 1781 err = -EINVAL; 1782 if (mnt_ns_loop(old_path.dentry)) 1783 goto out; 1784 1785 mp = lock_mount(path); 1786 err = PTR_ERR(mp); 1787 if (IS_ERR(mp)) 1788 goto out; 1789 1790 old = real_mount(old_path.mnt); 1791 parent = real_mount(path->mnt); 1792 1793 err = -EINVAL; 1794 if (IS_MNT_UNBINDABLE(old)) 1795 goto out2; 1796 1797 if (!check_mnt(parent) || !check_mnt(old)) 1798 goto out2; 1799 1800 if (!recurse && has_locked_children(old, old_path.dentry)) 1801 goto out2; 1802 1803 if (recurse) 1804 mnt = copy_tree(old, old_path.dentry, CL_COPY_MNT_NS_FILE); 1805 else 1806 mnt = clone_mnt(old, old_path.dentry, 0); 1807 1808 if (IS_ERR(mnt)) { 1809 err = PTR_ERR(mnt); 1810 goto out2; 1811 } 1812 1813 mnt->mnt.mnt_flags &= ~MNT_LOCKED; 1814 1815 err = graft_tree(mnt, parent, mp); 1816 if (err) { 1817 lock_mount_hash(); 1818 umount_tree(mnt, 0); 1819 unlock_mount_hash(); 1820 } 1821 out2: 1822 unlock_mount(mp); 1823 out: 1824 path_put(&old_path); 1825 return err; 1826 } 1827 1828 static int change_mount_flags(struct vfsmount *mnt, int ms_flags) 1829 { 1830 int error = 0; 1831 int readonly_request = 0; 1832 1833 if (ms_flags & MS_RDONLY) 1834 readonly_request = 1; 1835 if (readonly_request == __mnt_is_readonly(mnt)) 1836 return 0; 1837 1838 if (mnt->mnt_flags & MNT_LOCK_READONLY) 1839 return -EPERM; 1840 1841 if (readonly_request) 1842 error = mnt_make_readonly(real_mount(mnt)); 1843 else 1844 __mnt_unmake_readonly(real_mount(mnt)); 1845 return error; 1846 } 1847 1848 /* 1849 * change filesystem flags. dir should be a physical root of filesystem. 1850 * If you've mounted a non-root directory somewhere and want to do remount 1851 * on it - tough luck. 1852 */ 1853 static int do_remount(struct path *path, int flags, int mnt_flags, 1854 void *data) 1855 { 1856 int err; 1857 struct super_block *sb = path->mnt->mnt_sb; 1858 struct mount *mnt = real_mount(path->mnt); 1859 1860 if (!check_mnt(mnt)) 1861 return -EINVAL; 1862 1863 if (path->dentry != path->mnt->mnt_root) 1864 return -EINVAL; 1865 1866 err = security_sb_remount(sb, data); 1867 if (err) 1868 return err; 1869 1870 down_write(&sb->s_umount); 1871 if (flags & MS_BIND) 1872 err = change_mount_flags(path->mnt, flags); 1873 else if (!capable(CAP_SYS_ADMIN)) 1874 err = -EPERM; 1875 else 1876 err = do_remount_sb(sb, flags, data, 0); 1877 if (!err) { 1878 lock_mount_hash(); 1879 mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK; 1880 mnt->mnt.mnt_flags = mnt_flags; 1881 touch_mnt_namespace(mnt->mnt_ns); 1882 unlock_mount_hash(); 1883 } 1884 up_write(&sb->s_umount); 1885 return err; 1886 } 1887 1888 static inline int tree_contains_unbindable(struct mount *mnt) 1889 { 1890 struct mount *p; 1891 for (p = mnt; p; p = next_mnt(p, mnt)) { 1892 if (IS_MNT_UNBINDABLE(p)) 1893 return 1; 1894 } 1895 return 0; 1896 } 1897 1898 static int do_move_mount(struct path *path, const char *old_name) 1899 { 1900 struct path old_path, parent_path; 1901 struct mount *p; 1902 struct mount *old; 1903 struct mountpoint *mp; 1904 int err; 1905 if (!old_name || !*old_name) 1906 return -EINVAL; 1907 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); 1908 if (err) 1909 return err; 1910 1911 mp = lock_mount(path); 1912 err = PTR_ERR(mp); 1913 if (IS_ERR(mp)) 1914 goto out; 1915 1916 old = real_mount(old_path.mnt); 1917 p = real_mount(path->mnt); 1918 1919 err = -EINVAL; 1920 if (!check_mnt(p) || !check_mnt(old)) 1921 goto out1; 1922 1923 if (old->mnt.mnt_flags & MNT_LOCKED) 1924 goto out1; 1925 1926 err = -EINVAL; 1927 if (old_path.dentry != old_path.mnt->mnt_root) 1928 goto out1; 1929 1930 if (!mnt_has_parent(old)) 1931 goto out1; 1932 1933 if (S_ISDIR(path->dentry->d_inode->i_mode) != 1934 S_ISDIR(old_path.dentry->d_inode->i_mode)) 1935 goto out1; 1936 /* 1937 * Don't move a mount residing in a shared parent. 1938 */ 1939 if (IS_MNT_SHARED(old->mnt_parent)) 1940 goto out1; 1941 /* 1942 * Don't move a mount tree containing unbindable mounts to a destination 1943 * mount which is shared. 1944 */ 1945 if (IS_MNT_SHARED(p) && tree_contains_unbindable(old)) 1946 goto out1; 1947 err = -ELOOP; 1948 for (; mnt_has_parent(p); p = p->mnt_parent) 1949 if (p == old) 1950 goto out1; 1951 1952 err = attach_recursive_mnt(old, real_mount(path->mnt), mp, &parent_path); 1953 if (err) 1954 goto out1; 1955 1956 /* if the mount is moved, it should no longer be expire 1957 * automatically */ 1958 list_del_init(&old->mnt_expire); 1959 out1: 1960 unlock_mount(mp); 1961 out: 1962 if (!err) 1963 path_put(&parent_path); 1964 path_put(&old_path); 1965 return err; 1966 } 1967 1968 static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype) 1969 { 1970 int err; 1971 const char *subtype = strchr(fstype, '.'); 1972 if (subtype) { 1973 subtype++; 1974 err = -EINVAL; 1975 if (!subtype[0]) 1976 goto err; 1977 } else 1978 subtype = ""; 1979 1980 mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL); 1981 err = -ENOMEM; 1982 if (!mnt->mnt_sb->s_subtype) 1983 goto err; 1984 return mnt; 1985 1986 err: 1987 mntput(mnt); 1988 return ERR_PTR(err); 1989 } 1990 1991 /* 1992 * add a mount into a namespace's mount tree 1993 */ 1994 static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags) 1995 { 1996 struct mountpoint *mp; 1997 struct mount *parent; 1998 int err; 1999 2000 mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | MNT_DOOMED | MNT_SYNC_UMOUNT); 2001 2002 mp = lock_mount(path); 2003 if (IS_ERR(mp)) 2004 return PTR_ERR(mp); 2005 2006 parent = real_mount(path->mnt); 2007 err = -EINVAL; 2008 if (unlikely(!check_mnt(parent))) { 2009 /* that's acceptable only for automounts done in private ns */ 2010 if (!(mnt_flags & MNT_SHRINKABLE)) 2011 goto unlock; 2012 /* ... and for those we'd better have mountpoint still alive */ 2013 if (!parent->mnt_ns) 2014 goto unlock; 2015 } 2016 2017 /* Refuse the same filesystem on the same mount point */ 2018 err = -EBUSY; 2019 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && 2020 path->mnt->mnt_root == path->dentry) 2021 goto unlock; 2022 2023 err = -EINVAL; 2024 if (S_ISLNK(newmnt->mnt.mnt_root->d_inode->i_mode)) 2025 goto unlock; 2026 2027 newmnt->mnt.mnt_flags = mnt_flags; 2028 err = graft_tree(newmnt, parent, mp); 2029 2030 unlock: 2031 unlock_mount(mp); 2032 return err; 2033 } 2034 2035 /* 2036 * create a new mount for userspace and request it to be added into the 2037 * namespace's tree 2038 */ 2039 static int do_new_mount(struct path *path, const char *fstype, int flags, 2040 int mnt_flags, const char *name, void *data) 2041 { 2042 struct file_system_type *type; 2043 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; 2044 struct vfsmount *mnt; 2045 int err; 2046 2047 if (!fstype) 2048 return -EINVAL; 2049 2050 type = get_fs_type(fstype); 2051 if (!type) 2052 return -ENODEV; 2053 2054 if (user_ns != &init_user_ns) { 2055 if (!(type->fs_flags & FS_USERNS_MOUNT)) { 2056 put_filesystem(type); 2057 return -EPERM; 2058 } 2059 /* Only in special cases allow devices from mounts 2060 * created outside the initial user namespace. 2061 */ 2062 if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) { 2063 flags |= MS_NODEV; 2064 mnt_flags |= MNT_NODEV; 2065 } 2066 } 2067 2068 mnt = vfs_kern_mount(type, flags, name, data); 2069 if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) && 2070 !mnt->mnt_sb->s_subtype) 2071 mnt = fs_set_subtype(mnt, fstype); 2072 2073 put_filesystem(type); 2074 if (IS_ERR(mnt)) 2075 return PTR_ERR(mnt); 2076 2077 err = do_add_mount(real_mount(mnt), path, mnt_flags); 2078 if (err) 2079 mntput(mnt); 2080 return err; 2081 } 2082 2083 int finish_automount(struct vfsmount *m, struct path *path) 2084 { 2085 struct mount *mnt = real_mount(m); 2086 int err; 2087 /* The new mount record should have at least 2 refs to prevent it being 2088 * expired before we get a chance to add it 2089 */ 2090 BUG_ON(mnt_get_count(mnt) < 2); 2091 2092 if (m->mnt_sb == path->mnt->mnt_sb && 2093 m->mnt_root == path->dentry) { 2094 err = -ELOOP; 2095 goto fail; 2096 } 2097 2098 err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE); 2099 if (!err) 2100 return 0; 2101 fail: 2102 /* remove m from any expiration list it may be on */ 2103 if (!list_empty(&mnt->mnt_expire)) { 2104 namespace_lock(); 2105 list_del_init(&mnt->mnt_expire); 2106 namespace_unlock(); 2107 } 2108 mntput(m); 2109 mntput(m); 2110 return err; 2111 } 2112 2113 /** 2114 * mnt_set_expiry - Put a mount on an expiration list 2115 * @mnt: The mount to list. 2116 * @expiry_list: The list to add the mount to. 2117 */ 2118 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) 2119 { 2120 namespace_lock(); 2121 2122 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); 2123 2124 namespace_unlock(); 2125 } 2126 EXPORT_SYMBOL(mnt_set_expiry); 2127 2128 /* 2129 * process a list of expirable mountpoints with the intent of discarding any 2130 * mountpoints that aren't in use and haven't been touched since last we came 2131 * here 2132 */ 2133 void mark_mounts_for_expiry(struct list_head *mounts) 2134 { 2135 struct mount *mnt, *next; 2136 LIST_HEAD(graveyard); 2137 2138 if (list_empty(mounts)) 2139 return; 2140 2141 namespace_lock(); 2142 lock_mount_hash(); 2143 2144 /* extract from the expiration list every vfsmount that matches the 2145 * following criteria: 2146 * - only referenced by its parent vfsmount 2147 * - still marked for expiry (marked on the last call here; marks are 2148 * cleared by mntput()) 2149 */ 2150 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) { 2151 if (!xchg(&mnt->mnt_expiry_mark, 1) || 2152 propagate_mount_busy(mnt, 1)) 2153 continue; 2154 list_move(&mnt->mnt_expire, &graveyard); 2155 } 2156 while (!list_empty(&graveyard)) { 2157 mnt = list_first_entry(&graveyard, struct mount, mnt_expire); 2158 touch_mnt_namespace(mnt->mnt_ns); 2159 umount_tree(mnt, 1); 2160 } 2161 unlock_mount_hash(); 2162 namespace_unlock(); 2163 } 2164 2165 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry); 2166 2167 /* 2168 * Ripoff of 'select_parent()' 2169 * 2170 * search the list of submounts for a given mountpoint, and move any 2171 * shrinkable submounts to the 'graveyard' list. 2172 */ 2173 static int select_submounts(struct mount *parent, struct list_head *graveyard) 2174 { 2175 struct mount *this_parent = parent; 2176 struct list_head *next; 2177 int found = 0; 2178 2179 repeat: 2180 next = this_parent->mnt_mounts.next; 2181 resume: 2182 while (next != &this_parent->mnt_mounts) { 2183 struct list_head *tmp = next; 2184 struct mount *mnt = list_entry(tmp, struct mount, mnt_child); 2185 2186 next = tmp->next; 2187 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE)) 2188 continue; 2189 /* 2190 * Descend a level if the d_mounts list is non-empty. 2191 */ 2192 if (!list_empty(&mnt->mnt_mounts)) { 2193 this_parent = mnt; 2194 goto repeat; 2195 } 2196 2197 if (!propagate_mount_busy(mnt, 1)) { 2198 list_move_tail(&mnt->mnt_expire, graveyard); 2199 found++; 2200 } 2201 } 2202 /* 2203 * All done at this level ... ascend and resume the search 2204 */ 2205 if (this_parent != parent) { 2206 next = this_parent->mnt_child.next; 2207 this_parent = this_parent->mnt_parent; 2208 goto resume; 2209 } 2210 return found; 2211 } 2212 2213 /* 2214 * process a list of expirable mountpoints with the intent of discarding any 2215 * submounts of a specific parent mountpoint 2216 * 2217 * mount_lock must be held for write 2218 */ 2219 static void shrink_submounts(struct mount *mnt) 2220 { 2221 LIST_HEAD(graveyard); 2222 struct mount *m; 2223 2224 /* extract submounts of 'mountpoint' from the expiration list */ 2225 while (select_submounts(mnt, &graveyard)) { 2226 while (!list_empty(&graveyard)) { 2227 m = list_first_entry(&graveyard, struct mount, 2228 mnt_expire); 2229 touch_mnt_namespace(m->mnt_ns); 2230 umount_tree(m, 1); 2231 } 2232 } 2233 } 2234 2235 /* 2236 * Some copy_from_user() implementations do not return the exact number of 2237 * bytes remaining to copy on a fault. But copy_mount_options() requires that. 2238 * Note that this function differs from copy_from_user() in that it will oops 2239 * on bad values of `to', rather than returning a short copy. 2240 */ 2241 static long exact_copy_from_user(void *to, const void __user * from, 2242 unsigned long n) 2243 { 2244 char *t = to; 2245 const char __user *f = from; 2246 char c; 2247 2248 if (!access_ok(VERIFY_READ, from, n)) 2249 return n; 2250 2251 while (n) { 2252 if (__get_user(c, f)) { 2253 memset(t, 0, n); 2254 break; 2255 } 2256 *t++ = c; 2257 f++; 2258 n--; 2259 } 2260 return n; 2261 } 2262 2263 int copy_mount_options(const void __user * data, unsigned long *where) 2264 { 2265 int i; 2266 unsigned long page; 2267 unsigned long size; 2268 2269 *where = 0; 2270 if (!data) 2271 return 0; 2272 2273 if (!(page = __get_free_page(GFP_KERNEL))) 2274 return -ENOMEM; 2275 2276 /* We only care that *some* data at the address the user 2277 * gave us is valid. Just in case, we'll zero 2278 * the remainder of the page. 2279 */ 2280 /* copy_from_user cannot cross TASK_SIZE ! */ 2281 size = TASK_SIZE - (unsigned long)data; 2282 if (size > PAGE_SIZE) 2283 size = PAGE_SIZE; 2284 2285 i = size - exact_copy_from_user((void *)page, data, size); 2286 if (!i) { 2287 free_page(page); 2288 return -EFAULT; 2289 } 2290 if (i != PAGE_SIZE) 2291 memset((char *)page + i, 0, PAGE_SIZE - i); 2292 *where = page; 2293 return 0; 2294 } 2295 2296 int copy_mount_string(const void __user *data, char **where) 2297 { 2298 char *tmp; 2299 2300 if (!data) { 2301 *where = NULL; 2302 return 0; 2303 } 2304 2305 tmp = strndup_user(data, PAGE_SIZE); 2306 if (IS_ERR(tmp)) 2307 return PTR_ERR(tmp); 2308 2309 *where = tmp; 2310 return 0; 2311 } 2312 2313 /* 2314 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to 2315 * be given to the mount() call (ie: read-only, no-dev, no-suid etc). 2316 * 2317 * data is a (void *) that can point to any structure up to 2318 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent 2319 * information (or be NULL). 2320 * 2321 * Pre-0.97 versions of mount() didn't have a flags word. 2322 * When the flags word was introduced its top half was required 2323 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9. 2324 * Therefore, if this magic number is present, it carries no information 2325 * and must be discarded. 2326 */ 2327 long do_mount(const char *dev_name, const char *dir_name, 2328 const char *type_page, unsigned long flags, void *data_page) 2329 { 2330 struct path path; 2331 int retval = 0; 2332 int mnt_flags = 0; 2333 2334 /* Discard magic */ 2335 if ((flags & MS_MGC_MSK) == MS_MGC_VAL) 2336 flags &= ~MS_MGC_MSK; 2337 2338 /* Basic sanity checks */ 2339 2340 if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE)) 2341 return -EINVAL; 2342 2343 if (data_page) 2344 ((char *)data_page)[PAGE_SIZE - 1] = 0; 2345 2346 /* ... and get the mountpoint */ 2347 retval = kern_path(dir_name, LOOKUP_FOLLOW, &path); 2348 if (retval) 2349 return retval; 2350 2351 retval = security_sb_mount(dev_name, &path, 2352 type_page, flags, data_page); 2353 if (!retval && !may_mount()) 2354 retval = -EPERM; 2355 if (retval) 2356 goto dput_out; 2357 2358 /* Default to relatime unless overriden */ 2359 if (!(flags & MS_NOATIME)) 2360 mnt_flags |= MNT_RELATIME; 2361 2362 /* Separate the per-mountpoint flags */ 2363 if (flags & MS_NOSUID) 2364 mnt_flags |= MNT_NOSUID; 2365 if (flags & MS_NODEV) 2366 mnt_flags |= MNT_NODEV; 2367 if (flags & MS_NOEXEC) 2368 mnt_flags |= MNT_NOEXEC; 2369 if (flags & MS_NOATIME) 2370 mnt_flags |= MNT_NOATIME; 2371 if (flags & MS_NODIRATIME) 2372 mnt_flags |= MNT_NODIRATIME; 2373 if (flags & MS_STRICTATIME) 2374 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); 2375 if (flags & MS_RDONLY) 2376 mnt_flags |= MNT_READONLY; 2377 2378 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN | 2379 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | 2380 MS_STRICTATIME); 2381 2382 if (flags & MS_REMOUNT) 2383 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, 2384 data_page); 2385 else if (flags & MS_BIND) 2386 retval = do_loopback(&path, dev_name, flags & MS_REC); 2387 else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) 2388 retval = do_change_type(&path, flags); 2389 else if (flags & MS_MOVE) 2390 retval = do_move_mount(&path, dev_name); 2391 else 2392 retval = do_new_mount(&path, type_page, flags, mnt_flags, 2393 dev_name, data_page); 2394 dput_out: 2395 path_put(&path); 2396 return retval; 2397 } 2398 2399 static void free_mnt_ns(struct mnt_namespace *ns) 2400 { 2401 proc_free_inum(ns->proc_inum); 2402 put_user_ns(ns->user_ns); 2403 kfree(ns); 2404 } 2405 2406 /* 2407 * Assign a sequence number so we can detect when we attempt to bind 2408 * mount a reference to an older mount namespace into the current 2409 * mount namespace, preventing reference counting loops. A 64bit 2410 * number incrementing at 10Ghz will take 12,427 years to wrap which 2411 * is effectively never, so we can ignore the possibility. 2412 */ 2413 static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1); 2414 2415 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) 2416 { 2417 struct mnt_namespace *new_ns; 2418 int ret; 2419 2420 new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL); 2421 if (!new_ns) 2422 return ERR_PTR(-ENOMEM); 2423 ret = proc_alloc_inum(&new_ns->proc_inum); 2424 if (ret) { 2425 kfree(new_ns); 2426 return ERR_PTR(ret); 2427 } 2428 new_ns->seq = atomic64_add_return(1, &mnt_ns_seq); 2429 atomic_set(&new_ns->count, 1); 2430 new_ns->root = NULL; 2431 INIT_LIST_HEAD(&new_ns->list); 2432 init_waitqueue_head(&new_ns->poll); 2433 new_ns->event = 0; 2434 new_ns->user_ns = get_user_ns(user_ns); 2435 return new_ns; 2436 } 2437 2438 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, 2439 struct user_namespace *user_ns, struct fs_struct *new_fs) 2440 { 2441 struct mnt_namespace *new_ns; 2442 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL; 2443 struct mount *p, *q; 2444 struct mount *old; 2445 struct mount *new; 2446 int copy_flags; 2447 2448 BUG_ON(!ns); 2449 2450 if (likely(!(flags & CLONE_NEWNS))) { 2451 get_mnt_ns(ns); 2452 return ns; 2453 } 2454 2455 old = ns->root; 2456 2457 new_ns = alloc_mnt_ns(user_ns); 2458 if (IS_ERR(new_ns)) 2459 return new_ns; 2460 2461 namespace_lock(); 2462 /* First pass: copy the tree topology */ 2463 copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE; 2464 if (user_ns != ns->user_ns) 2465 copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED; 2466 new = copy_tree(old, old->mnt.mnt_root, copy_flags); 2467 if (IS_ERR(new)) { 2468 namespace_unlock(); 2469 free_mnt_ns(new_ns); 2470 return ERR_CAST(new); 2471 } 2472 new_ns->root = new; 2473 list_add_tail(&new_ns->list, &new->mnt_list); 2474 2475 /* 2476 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts 2477 * as belonging to new namespace. We have already acquired a private 2478 * fs_struct, so tsk->fs->lock is not needed. 2479 */ 2480 p = old; 2481 q = new; 2482 while (p) { 2483 q->mnt_ns = new_ns; 2484 if (new_fs) { 2485 if (&p->mnt == new_fs->root.mnt) { 2486 new_fs->root.mnt = mntget(&q->mnt); 2487 rootmnt = &p->mnt; 2488 } 2489 if (&p->mnt == new_fs->pwd.mnt) { 2490 new_fs->pwd.mnt = mntget(&q->mnt); 2491 pwdmnt = &p->mnt; 2492 } 2493 } 2494 p = next_mnt(p, old); 2495 q = next_mnt(q, new); 2496 if (!q) 2497 break; 2498 while (p->mnt.mnt_root != q->mnt.mnt_root) 2499 p = next_mnt(p, old); 2500 } 2501 namespace_unlock(); 2502 2503 if (rootmnt) 2504 mntput(rootmnt); 2505 if (pwdmnt) 2506 mntput(pwdmnt); 2507 2508 return new_ns; 2509 } 2510 2511 /** 2512 * create_mnt_ns - creates a private namespace and adds a root filesystem 2513 * @mnt: pointer to the new root filesystem mountpoint 2514 */ 2515 static struct mnt_namespace *create_mnt_ns(struct vfsmount *m) 2516 { 2517 struct mnt_namespace *new_ns = alloc_mnt_ns(&init_user_ns); 2518 if (!IS_ERR(new_ns)) { 2519 struct mount *mnt = real_mount(m); 2520 mnt->mnt_ns = new_ns; 2521 new_ns->root = mnt; 2522 list_add(&mnt->mnt_list, &new_ns->list); 2523 } else { 2524 mntput(m); 2525 } 2526 return new_ns; 2527 } 2528 2529 struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) 2530 { 2531 struct mnt_namespace *ns; 2532 struct super_block *s; 2533 struct path path; 2534 int err; 2535 2536 ns = create_mnt_ns(mnt); 2537 if (IS_ERR(ns)) 2538 return ERR_CAST(ns); 2539 2540 err = vfs_path_lookup(mnt->mnt_root, mnt, 2541 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); 2542 2543 put_mnt_ns(ns); 2544 2545 if (err) 2546 return ERR_PTR(err); 2547 2548 /* trade a vfsmount reference for active sb one */ 2549 s = path.mnt->mnt_sb; 2550 atomic_inc(&s->s_active); 2551 mntput(path.mnt); 2552 /* lock the sucker */ 2553 down_write(&s->s_umount); 2554 /* ... and return the root of (sub)tree on it */ 2555 return path.dentry; 2556 } 2557 EXPORT_SYMBOL(mount_subtree); 2558 2559 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, 2560 char __user *, type, unsigned long, flags, void __user *, data) 2561 { 2562 int ret; 2563 char *kernel_type; 2564 struct filename *kernel_dir; 2565 char *kernel_dev; 2566 unsigned long data_page; 2567 2568 ret = copy_mount_string(type, &kernel_type); 2569 if (ret < 0) 2570 goto out_type; 2571 2572 kernel_dir = getname(dir_name); 2573 if (IS_ERR(kernel_dir)) { 2574 ret = PTR_ERR(kernel_dir); 2575 goto out_dir; 2576 } 2577 2578 ret = copy_mount_string(dev_name, &kernel_dev); 2579 if (ret < 0) 2580 goto out_dev; 2581 2582 ret = copy_mount_options(data, &data_page); 2583 if (ret < 0) 2584 goto out_data; 2585 2586 ret = do_mount(kernel_dev, kernel_dir->name, kernel_type, flags, 2587 (void *) data_page); 2588 2589 free_page(data_page); 2590 out_data: 2591 kfree(kernel_dev); 2592 out_dev: 2593 putname(kernel_dir); 2594 out_dir: 2595 kfree(kernel_type); 2596 out_type: 2597 return ret; 2598 } 2599 2600 /* 2601 * Return true if path is reachable from root 2602 * 2603 * namespace_sem or mount_lock is held 2604 */ 2605 bool is_path_reachable(struct mount *mnt, struct dentry *dentry, 2606 const struct path *root) 2607 { 2608 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) { 2609 dentry = mnt->mnt_mountpoint; 2610 mnt = mnt->mnt_parent; 2611 } 2612 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry); 2613 } 2614 2615 int path_is_under(struct path *path1, struct path *path2) 2616 { 2617 int res; 2618 read_seqlock_excl(&mount_lock); 2619 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2); 2620 read_sequnlock_excl(&mount_lock); 2621 return res; 2622 } 2623 EXPORT_SYMBOL(path_is_under); 2624 2625 /* 2626 * pivot_root Semantics: 2627 * Moves the root file system of the current process to the directory put_old, 2628 * makes new_root as the new root file system of the current process, and sets 2629 * root/cwd of all processes which had them on the current root to new_root. 2630 * 2631 * Restrictions: 2632 * The new_root and put_old must be directories, and must not be on the 2633 * same file system as the current process root. The put_old must be 2634 * underneath new_root, i.e. adding a non-zero number of /.. to the string 2635 * pointed to by put_old must yield the same directory as new_root. No other 2636 * file system may be mounted on put_old. After all, new_root is a mountpoint. 2637 * 2638 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem. 2639 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives 2640 * in this situation. 2641 * 2642 * Notes: 2643 * - we don't move root/cwd if they are not at the root (reason: if something 2644 * cared enough to change them, it's probably wrong to force them elsewhere) 2645 * - it's okay to pick a root that isn't the root of a file system, e.g. 2646 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint, 2647 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root 2648 * first. 2649 */ 2650 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, 2651 const char __user *, put_old) 2652 { 2653 struct path new, old, parent_path, root_parent, root; 2654 struct mount *new_mnt, *root_mnt, *old_mnt; 2655 struct mountpoint *old_mp, *root_mp; 2656 int error; 2657 2658 if (!may_mount()) 2659 return -EPERM; 2660 2661 error = user_path_dir(new_root, &new); 2662 if (error) 2663 goto out0; 2664 2665 error = user_path_dir(put_old, &old); 2666 if (error) 2667 goto out1; 2668 2669 error = security_sb_pivotroot(&old, &new); 2670 if (error) 2671 goto out2; 2672 2673 get_fs_root(current->fs, &root); 2674 old_mp = lock_mount(&old); 2675 error = PTR_ERR(old_mp); 2676 if (IS_ERR(old_mp)) 2677 goto out3; 2678 2679 error = -EINVAL; 2680 new_mnt = real_mount(new.mnt); 2681 root_mnt = real_mount(root.mnt); 2682 old_mnt = real_mount(old.mnt); 2683 if (IS_MNT_SHARED(old_mnt) || 2684 IS_MNT_SHARED(new_mnt->mnt_parent) || 2685 IS_MNT_SHARED(root_mnt->mnt_parent)) 2686 goto out4; 2687 if (!check_mnt(root_mnt) || !check_mnt(new_mnt)) 2688 goto out4; 2689 if (new_mnt->mnt.mnt_flags & MNT_LOCKED) 2690 goto out4; 2691 error = -ENOENT; 2692 if (d_unlinked(new.dentry)) 2693 goto out4; 2694 error = -EBUSY; 2695 if (new_mnt == root_mnt || old_mnt == root_mnt) 2696 goto out4; /* loop, on the same file system */ 2697 error = -EINVAL; 2698 if (root.mnt->mnt_root != root.dentry) 2699 goto out4; /* not a mountpoint */ 2700 if (!mnt_has_parent(root_mnt)) 2701 goto out4; /* not attached */ 2702 root_mp = root_mnt->mnt_mp; 2703 if (new.mnt->mnt_root != new.dentry) 2704 goto out4; /* not a mountpoint */ 2705 if (!mnt_has_parent(new_mnt)) 2706 goto out4; /* not attached */ 2707 /* make sure we can reach put_old from new_root */ 2708 if (!is_path_reachable(old_mnt, old.dentry, &new)) 2709 goto out4; 2710 root_mp->m_count++; /* pin it so it won't go away */ 2711 lock_mount_hash(); 2712 detach_mnt(new_mnt, &parent_path); 2713 detach_mnt(root_mnt, &root_parent); 2714 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) { 2715 new_mnt->mnt.mnt_flags |= MNT_LOCKED; 2716 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED; 2717 } 2718 /* mount old root on put_old */ 2719 attach_mnt(root_mnt, old_mnt, old_mp); 2720 /* mount new_root on / */ 2721 attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp); 2722 touch_mnt_namespace(current->nsproxy->mnt_ns); 2723 unlock_mount_hash(); 2724 chroot_fs_refs(&root, &new); 2725 put_mountpoint(root_mp); 2726 error = 0; 2727 out4: 2728 unlock_mount(old_mp); 2729 if (!error) { 2730 path_put(&root_parent); 2731 path_put(&parent_path); 2732 } 2733 out3: 2734 path_put(&root); 2735 out2: 2736 path_put(&old); 2737 out1: 2738 path_put(&new); 2739 out0: 2740 return error; 2741 } 2742 2743 static void __init init_mount_tree(void) 2744 { 2745 struct vfsmount *mnt; 2746 struct mnt_namespace *ns; 2747 struct path root; 2748 struct file_system_type *type; 2749 2750 type = get_fs_type("rootfs"); 2751 if (!type) 2752 panic("Can't find rootfs type"); 2753 mnt = vfs_kern_mount(type, 0, "rootfs", NULL); 2754 put_filesystem(type); 2755 if (IS_ERR(mnt)) 2756 panic("Can't create rootfs"); 2757 2758 ns = create_mnt_ns(mnt); 2759 if (IS_ERR(ns)) 2760 panic("Can't allocate initial namespace"); 2761 2762 init_task.nsproxy->mnt_ns = ns; 2763 get_mnt_ns(ns); 2764 2765 root.mnt = mnt; 2766 root.dentry = mnt->mnt_root; 2767 2768 set_fs_pwd(current->fs, &root); 2769 set_fs_root(current->fs, &root); 2770 } 2771 2772 void __init mnt_init(void) 2773 { 2774 unsigned u; 2775 int err; 2776 2777 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount), 2778 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 2779 2780 mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); 2781 mountpoint_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); 2782 2783 if (!mount_hashtable || !mountpoint_hashtable) 2784 panic("Failed to allocate mount hash table\n"); 2785 2786 printk(KERN_INFO "Mount-cache hash table entries: %lu\n", HASH_SIZE); 2787 2788 for (u = 0; u < HASH_SIZE; u++) 2789 INIT_LIST_HEAD(&mount_hashtable[u]); 2790 for (u = 0; u < HASH_SIZE; u++) 2791 INIT_LIST_HEAD(&mountpoint_hashtable[u]); 2792 2793 kernfs_init(); 2794 2795 err = sysfs_init(); 2796 if (err) 2797 printk(KERN_WARNING "%s: sysfs_init error: %d\n", 2798 __func__, err); 2799 fs_kobj = kobject_create_and_add("fs", NULL); 2800 if (!fs_kobj) 2801 printk(KERN_WARNING "%s: kobj create error\n", __func__); 2802 init_rootfs(); 2803 init_mount_tree(); 2804 } 2805 2806 void put_mnt_ns(struct mnt_namespace *ns) 2807 { 2808 if (!atomic_dec_and_test(&ns->count)) 2809 return; 2810 drop_collected_mounts(&ns->root->mnt); 2811 free_mnt_ns(ns); 2812 } 2813 2814 struct vfsmount *kern_mount_data(struct file_system_type *type, void *data) 2815 { 2816 struct vfsmount *mnt; 2817 mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, data); 2818 if (!IS_ERR(mnt)) { 2819 /* 2820 * it is a longterm mount, don't release mnt until 2821 * we unmount before file sys is unregistered 2822 */ 2823 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL; 2824 } 2825 return mnt; 2826 } 2827 EXPORT_SYMBOL_GPL(kern_mount_data); 2828 2829 void kern_unmount(struct vfsmount *mnt) 2830 { 2831 /* release long term mount so mount point can be released */ 2832 if (!IS_ERR_OR_NULL(mnt)) { 2833 real_mount(mnt)->mnt_ns = NULL; 2834 synchronize_rcu(); /* yecchhh... */ 2835 mntput(mnt); 2836 } 2837 } 2838 EXPORT_SYMBOL(kern_unmount); 2839 2840 bool our_mnt(struct vfsmount *mnt) 2841 { 2842 return check_mnt(real_mount(mnt)); 2843 } 2844 2845 bool current_chrooted(void) 2846 { 2847 /* Does the current process have a non-standard root */ 2848 struct path ns_root; 2849 struct path fs_root; 2850 bool chrooted; 2851 2852 /* Find the namespace root */ 2853 ns_root.mnt = ¤t->nsproxy->mnt_ns->root->mnt; 2854 ns_root.dentry = ns_root.mnt->mnt_root; 2855 path_get(&ns_root); 2856 while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root)) 2857 ; 2858 2859 get_fs_root(current->fs, &fs_root); 2860 2861 chrooted = !path_equal(&fs_root, &ns_root); 2862 2863 path_put(&fs_root); 2864 path_put(&ns_root); 2865 2866 return chrooted; 2867 } 2868 2869 bool fs_fully_visible(struct file_system_type *type) 2870 { 2871 struct mnt_namespace *ns = current->nsproxy->mnt_ns; 2872 struct mount *mnt; 2873 bool visible = false; 2874 2875 if (unlikely(!ns)) 2876 return false; 2877 2878 down_read(&namespace_sem); 2879 list_for_each_entry(mnt, &ns->list, mnt_list) { 2880 struct mount *child; 2881 if (mnt->mnt.mnt_sb->s_type != type) 2882 continue; 2883 2884 /* This mount is not fully visible if there are any child mounts 2885 * that cover anything except for empty directories. 2886 */ 2887 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { 2888 struct inode *inode = child->mnt_mountpoint->d_inode; 2889 if (!S_ISDIR(inode->i_mode)) 2890 goto next; 2891 if (inode->i_nlink > 2) 2892 goto next; 2893 } 2894 visible = true; 2895 goto found; 2896 next: ; 2897 } 2898 found: 2899 up_read(&namespace_sem); 2900 return visible; 2901 } 2902 2903 static void *mntns_get(struct task_struct *task) 2904 { 2905 struct mnt_namespace *ns = NULL; 2906 struct nsproxy *nsproxy; 2907 2908 rcu_read_lock(); 2909 nsproxy = task_nsproxy(task); 2910 if (nsproxy) { 2911 ns = nsproxy->mnt_ns; 2912 get_mnt_ns(ns); 2913 } 2914 rcu_read_unlock(); 2915 2916 return ns; 2917 } 2918 2919 static void mntns_put(void *ns) 2920 { 2921 put_mnt_ns(ns); 2922 } 2923 2924 static int mntns_install(struct nsproxy *nsproxy, void *ns) 2925 { 2926 struct fs_struct *fs = current->fs; 2927 struct mnt_namespace *mnt_ns = ns; 2928 struct path root; 2929 2930 if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) || 2931 !ns_capable(current_user_ns(), CAP_SYS_CHROOT) || 2932 !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) 2933 return -EPERM; 2934 2935 if (fs->users != 1) 2936 return -EINVAL; 2937 2938 get_mnt_ns(mnt_ns); 2939 put_mnt_ns(nsproxy->mnt_ns); 2940 nsproxy->mnt_ns = mnt_ns; 2941 2942 /* Find the root */ 2943 root.mnt = &mnt_ns->root->mnt; 2944 root.dentry = mnt_ns->root->mnt.mnt_root; 2945 path_get(&root); 2946 while(d_mountpoint(root.dentry) && follow_down_one(&root)) 2947 ; 2948 2949 /* Update the pwd and root */ 2950 set_fs_pwd(fs, &root); 2951 set_fs_root(fs, &root); 2952 2953 path_put(&root); 2954 return 0; 2955 } 2956 2957 static unsigned int mntns_inum(void *ns) 2958 { 2959 struct mnt_namespace *mnt_ns = ns; 2960 return mnt_ns->proc_inum; 2961 } 2962 2963 const struct proc_ns_operations mntns_operations = { 2964 .name = "mnt", 2965 .type = CLONE_NEWNS, 2966 .get = mntns_get, 2967 .put = mntns_put, 2968 .install = mntns_install, 2969 .inum = mntns_inum, 2970 }; 2971