1 /* 2 * linux/fs/namespace.c 3 * 4 * (C) Copyright Al Viro 2000, 2001 5 * Released under GPL v2. 6 * 7 * Based on code from fs/super.c, copyright Linus Torvalds and others. 8 * Heavily rewritten. 9 */ 10 11 #include <linux/syscalls.h> 12 #include <linux/slab.h> 13 #include <linux/sched.h> 14 #include <linux/smp_lock.h> 15 #include <linux/init.h> 16 #include <linux/kernel.h> 17 #include <linux/acct.h> 18 #include <linux/capability.h> 19 #include <linux/cpumask.h> 20 #include <linux/module.h> 21 #include <linux/sysfs.h> 22 #include <linux/seq_file.h> 23 #include <linux/mnt_namespace.h> 24 #include <linux/namei.h> 25 #include <linux/security.h> 26 #include <linux/mount.h> 27 #include <linux/ramfs.h> 28 #include <linux/log2.h> 29 #include <linux/idr.h> 30 #include <linux/fs_struct.h> 31 #include <asm/uaccess.h> 32 #include <asm/unistd.h> 33 #include "pnode.h" 34 #include "internal.h" 35 36 #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head)) 37 #define HASH_SIZE (1UL << HASH_SHIFT) 38 39 /* spinlock for vfsmount related operations, inplace of dcache_lock */ 40 __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock); 41 42 static int event; 43 static DEFINE_IDA(mnt_id_ida); 44 static DEFINE_IDA(mnt_group_ida); 45 46 static struct list_head *mount_hashtable __read_mostly; 47 static struct kmem_cache *mnt_cache __read_mostly; 48 static struct rw_semaphore namespace_sem; 49 50 /* /sys/fs */ 51 struct kobject *fs_kobj; 52 EXPORT_SYMBOL_GPL(fs_kobj); 53 54 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) 55 { 56 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); 57 tmp += ((unsigned long)dentry / L1_CACHE_BYTES); 58 tmp = tmp + (tmp >> HASH_SHIFT); 59 return tmp & (HASH_SIZE - 1); 60 } 61 62 #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16) 63 64 /* allocation is serialized by namespace_sem */ 65 static int mnt_alloc_id(struct vfsmount *mnt) 66 { 67 int res; 68 69 retry: 70 ida_pre_get(&mnt_id_ida, GFP_KERNEL); 71 spin_lock(&vfsmount_lock); 72 res = ida_get_new(&mnt_id_ida, &mnt->mnt_id); 73 spin_unlock(&vfsmount_lock); 74 if (res == -EAGAIN) 75 goto retry; 76 77 return res; 78 } 79 80 static void mnt_free_id(struct vfsmount *mnt) 81 { 82 spin_lock(&vfsmount_lock); 83 ida_remove(&mnt_id_ida, mnt->mnt_id); 84 spin_unlock(&vfsmount_lock); 85 } 86 87 /* 88 * Allocate a new peer group ID 89 * 90 * mnt_group_ida is protected by namespace_sem 91 */ 92 static int mnt_alloc_group_id(struct vfsmount *mnt) 93 { 94 if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL)) 95 return -ENOMEM; 96 97 return ida_get_new_above(&mnt_group_ida, 1, &mnt->mnt_group_id); 98 } 99 100 /* 101 * Release a peer group ID 102 */ 103 void mnt_release_group_id(struct vfsmount *mnt) 104 { 105 ida_remove(&mnt_group_ida, mnt->mnt_group_id); 106 mnt->mnt_group_id = 0; 107 } 108 109 struct vfsmount *alloc_vfsmnt(const char *name) 110 { 111 struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); 112 if (mnt) { 113 int err; 114 115 err = mnt_alloc_id(mnt); 116 if (err) 117 goto out_free_cache; 118 119 if (name) { 120 mnt->mnt_devname = kstrdup(name, GFP_KERNEL); 121 if (!mnt->mnt_devname) 122 goto out_free_id; 123 } 124 125 atomic_set(&mnt->mnt_count, 1); 126 INIT_LIST_HEAD(&mnt->mnt_hash); 127 INIT_LIST_HEAD(&mnt->mnt_child); 128 INIT_LIST_HEAD(&mnt->mnt_mounts); 129 INIT_LIST_HEAD(&mnt->mnt_list); 130 INIT_LIST_HEAD(&mnt->mnt_expire); 131 INIT_LIST_HEAD(&mnt->mnt_share); 132 INIT_LIST_HEAD(&mnt->mnt_slave_list); 133 INIT_LIST_HEAD(&mnt->mnt_slave); 134 atomic_set(&mnt->__mnt_writers, 0); 135 } 136 return mnt; 137 138 out_free_id: 139 mnt_free_id(mnt); 140 out_free_cache: 141 kmem_cache_free(mnt_cache, mnt); 142 return NULL; 143 } 144 145 /* 146 * Most r/o checks on a fs are for operations that take 147 * discrete amounts of time, like a write() or unlink(). 148 * We must keep track of when those operations start 149 * (for permission checks) and when they end, so that 150 * we can determine when writes are able to occur to 151 * a filesystem. 152 */ 153 /* 154 * __mnt_is_readonly: check whether a mount is read-only 155 * @mnt: the mount to check for its write status 156 * 157 * This shouldn't be used directly ouside of the VFS. 158 * It does not guarantee that the filesystem will stay 159 * r/w, just that it is right *now*. This can not and 160 * should not be used in place of IS_RDONLY(inode). 161 * mnt_want/drop_write() will _keep_ the filesystem 162 * r/w. 163 */ 164 int __mnt_is_readonly(struct vfsmount *mnt) 165 { 166 if (mnt->mnt_flags & MNT_READONLY) 167 return 1; 168 if (mnt->mnt_sb->s_flags & MS_RDONLY) 169 return 1; 170 return 0; 171 } 172 EXPORT_SYMBOL_GPL(__mnt_is_readonly); 173 174 struct mnt_writer { 175 /* 176 * If holding multiple instances of this lock, they 177 * must be ordered by cpu number. 178 */ 179 spinlock_t lock; 180 struct lock_class_key lock_class; /* compiles out with !lockdep */ 181 unsigned long count; 182 struct vfsmount *mnt; 183 } ____cacheline_aligned_in_smp; 184 static DEFINE_PER_CPU(struct mnt_writer, mnt_writers); 185 186 static int __init init_mnt_writers(void) 187 { 188 int cpu; 189 for_each_possible_cpu(cpu) { 190 struct mnt_writer *writer = &per_cpu(mnt_writers, cpu); 191 spin_lock_init(&writer->lock); 192 lockdep_set_class(&writer->lock, &writer->lock_class); 193 writer->count = 0; 194 } 195 return 0; 196 } 197 fs_initcall(init_mnt_writers); 198 199 static void unlock_mnt_writers(void) 200 { 201 int cpu; 202 struct mnt_writer *cpu_writer; 203 204 for_each_possible_cpu(cpu) { 205 cpu_writer = &per_cpu(mnt_writers, cpu); 206 spin_unlock(&cpu_writer->lock); 207 } 208 } 209 210 static inline void __clear_mnt_count(struct mnt_writer *cpu_writer) 211 { 212 if (!cpu_writer->mnt) 213 return; 214 /* 215 * This is in case anyone ever leaves an invalid, 216 * old ->mnt and a count of 0. 217 */ 218 if (!cpu_writer->count) 219 return; 220 atomic_add(cpu_writer->count, &cpu_writer->mnt->__mnt_writers); 221 cpu_writer->count = 0; 222 } 223 /* 224 * must hold cpu_writer->lock 225 */ 226 static inline void use_cpu_writer_for_mount(struct mnt_writer *cpu_writer, 227 struct vfsmount *mnt) 228 { 229 if (cpu_writer->mnt == mnt) 230 return; 231 __clear_mnt_count(cpu_writer); 232 cpu_writer->mnt = mnt; 233 } 234 235 /* 236 * Most r/o checks on a fs are for operations that take 237 * discrete amounts of time, like a write() or unlink(). 238 * We must keep track of when those operations start 239 * (for permission checks) and when they end, so that 240 * we can determine when writes are able to occur to 241 * a filesystem. 242 */ 243 /** 244 * mnt_want_write - get write access to a mount 245 * @mnt: the mount on which to take a write 246 * 247 * This tells the low-level filesystem that a write is 248 * about to be performed to it, and makes sure that 249 * writes are allowed before returning success. When 250 * the write operation is finished, mnt_drop_write() 251 * must be called. This is effectively a refcount. 252 */ 253 int mnt_want_write(struct vfsmount *mnt) 254 { 255 int ret = 0; 256 struct mnt_writer *cpu_writer; 257 258 cpu_writer = &get_cpu_var(mnt_writers); 259 spin_lock(&cpu_writer->lock); 260 if (__mnt_is_readonly(mnt)) { 261 ret = -EROFS; 262 goto out; 263 } 264 use_cpu_writer_for_mount(cpu_writer, mnt); 265 cpu_writer->count++; 266 out: 267 spin_unlock(&cpu_writer->lock); 268 put_cpu_var(mnt_writers); 269 return ret; 270 } 271 EXPORT_SYMBOL_GPL(mnt_want_write); 272 273 static void lock_mnt_writers(void) 274 { 275 int cpu; 276 struct mnt_writer *cpu_writer; 277 278 for_each_possible_cpu(cpu) { 279 cpu_writer = &per_cpu(mnt_writers, cpu); 280 spin_lock(&cpu_writer->lock); 281 __clear_mnt_count(cpu_writer); 282 cpu_writer->mnt = NULL; 283 } 284 } 285 286 /* 287 * These per-cpu write counts are not guaranteed to have 288 * matched increments and decrements on any given cpu. 289 * A file open()ed for write on one cpu and close()d on 290 * another cpu will imbalance this count. Make sure it 291 * does not get too far out of whack. 292 */ 293 static void handle_write_count_underflow(struct vfsmount *mnt) 294 { 295 if (atomic_read(&mnt->__mnt_writers) >= 296 MNT_WRITER_UNDERFLOW_LIMIT) 297 return; 298 /* 299 * It isn't necessary to hold all of the locks 300 * at the same time, but doing it this way makes 301 * us share a lot more code. 302 */ 303 lock_mnt_writers(); 304 /* 305 * vfsmount_lock is for mnt_flags. 306 */ 307 spin_lock(&vfsmount_lock); 308 /* 309 * If coalescing the per-cpu writer counts did not 310 * get us back to a positive writer count, we have 311 * a bug. 312 */ 313 if ((atomic_read(&mnt->__mnt_writers) < 0) && 314 !(mnt->mnt_flags & MNT_IMBALANCED_WRITE_COUNT)) { 315 WARN(1, KERN_DEBUG "leak detected on mount(%p) writers " 316 "count: %d\n", 317 mnt, atomic_read(&mnt->__mnt_writers)); 318 /* use the flag to keep the dmesg spam down */ 319 mnt->mnt_flags |= MNT_IMBALANCED_WRITE_COUNT; 320 } 321 spin_unlock(&vfsmount_lock); 322 unlock_mnt_writers(); 323 } 324 325 /** 326 * mnt_drop_write - give up write access to a mount 327 * @mnt: the mount on which to give up write access 328 * 329 * Tells the low-level filesystem that we are done 330 * performing writes to it. Must be matched with 331 * mnt_want_write() call above. 332 */ 333 void mnt_drop_write(struct vfsmount *mnt) 334 { 335 int must_check_underflow = 0; 336 struct mnt_writer *cpu_writer; 337 338 cpu_writer = &get_cpu_var(mnt_writers); 339 spin_lock(&cpu_writer->lock); 340 341 use_cpu_writer_for_mount(cpu_writer, mnt); 342 if (cpu_writer->count > 0) { 343 cpu_writer->count--; 344 } else { 345 must_check_underflow = 1; 346 atomic_dec(&mnt->__mnt_writers); 347 } 348 349 spin_unlock(&cpu_writer->lock); 350 /* 351 * Logically, we could call this each time, 352 * but the __mnt_writers cacheline tends to 353 * be cold, and makes this expensive. 354 */ 355 if (must_check_underflow) 356 handle_write_count_underflow(mnt); 357 /* 358 * This could be done right after the spinlock 359 * is taken because the spinlock keeps us on 360 * the cpu, and disables preemption. However, 361 * putting it here bounds the amount that 362 * __mnt_writers can underflow. Without it, 363 * we could theoretically wrap __mnt_writers. 364 */ 365 put_cpu_var(mnt_writers); 366 } 367 EXPORT_SYMBOL_GPL(mnt_drop_write); 368 369 static int mnt_make_readonly(struct vfsmount *mnt) 370 { 371 int ret = 0; 372 373 lock_mnt_writers(); 374 /* 375 * With all the locks held, this value is stable 376 */ 377 if (atomic_read(&mnt->__mnt_writers) > 0) { 378 ret = -EBUSY; 379 goto out; 380 } 381 /* 382 * nobody can do a successful mnt_want_write() with all 383 * of the counts in MNT_DENIED_WRITE and the locks held. 384 */ 385 spin_lock(&vfsmount_lock); 386 if (!ret) 387 mnt->mnt_flags |= MNT_READONLY; 388 spin_unlock(&vfsmount_lock); 389 out: 390 unlock_mnt_writers(); 391 return ret; 392 } 393 394 static void __mnt_unmake_readonly(struct vfsmount *mnt) 395 { 396 spin_lock(&vfsmount_lock); 397 mnt->mnt_flags &= ~MNT_READONLY; 398 spin_unlock(&vfsmount_lock); 399 } 400 401 void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb) 402 { 403 mnt->mnt_sb = sb; 404 mnt->mnt_root = dget(sb->s_root); 405 } 406 407 EXPORT_SYMBOL(simple_set_mnt); 408 409 void free_vfsmnt(struct vfsmount *mnt) 410 { 411 kfree(mnt->mnt_devname); 412 mnt_free_id(mnt); 413 kmem_cache_free(mnt_cache, mnt); 414 } 415 416 /* 417 * find the first or last mount at @dentry on vfsmount @mnt depending on 418 * @dir. If @dir is set return the first mount else return the last mount. 419 */ 420 struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry, 421 int dir) 422 { 423 struct list_head *head = mount_hashtable + hash(mnt, dentry); 424 struct list_head *tmp = head; 425 struct vfsmount *p, *found = NULL; 426 427 for (;;) { 428 tmp = dir ? tmp->next : tmp->prev; 429 p = NULL; 430 if (tmp == head) 431 break; 432 p = list_entry(tmp, struct vfsmount, mnt_hash); 433 if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) { 434 found = p; 435 break; 436 } 437 } 438 return found; 439 } 440 441 /* 442 * lookup_mnt increments the ref count before returning 443 * the vfsmount struct. 444 */ 445 struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) 446 { 447 struct vfsmount *child_mnt; 448 spin_lock(&vfsmount_lock); 449 if ((child_mnt = __lookup_mnt(mnt, dentry, 1))) 450 mntget(child_mnt); 451 spin_unlock(&vfsmount_lock); 452 return child_mnt; 453 } 454 455 static inline int check_mnt(struct vfsmount *mnt) 456 { 457 return mnt->mnt_ns == current->nsproxy->mnt_ns; 458 } 459 460 static void touch_mnt_namespace(struct mnt_namespace *ns) 461 { 462 if (ns) { 463 ns->event = ++event; 464 wake_up_interruptible(&ns->poll); 465 } 466 } 467 468 static void __touch_mnt_namespace(struct mnt_namespace *ns) 469 { 470 if (ns && ns->event != event) { 471 ns->event = event; 472 wake_up_interruptible(&ns->poll); 473 } 474 } 475 476 static void detach_mnt(struct vfsmount *mnt, struct path *old_path) 477 { 478 old_path->dentry = mnt->mnt_mountpoint; 479 old_path->mnt = mnt->mnt_parent; 480 mnt->mnt_parent = mnt; 481 mnt->mnt_mountpoint = mnt->mnt_root; 482 list_del_init(&mnt->mnt_child); 483 list_del_init(&mnt->mnt_hash); 484 old_path->dentry->d_mounted--; 485 } 486 487 void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry, 488 struct vfsmount *child_mnt) 489 { 490 child_mnt->mnt_parent = mntget(mnt); 491 child_mnt->mnt_mountpoint = dget(dentry); 492 dentry->d_mounted++; 493 } 494 495 static void attach_mnt(struct vfsmount *mnt, struct path *path) 496 { 497 mnt_set_mountpoint(path->mnt, path->dentry, mnt); 498 list_add_tail(&mnt->mnt_hash, mount_hashtable + 499 hash(path->mnt, path->dentry)); 500 list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts); 501 } 502 503 /* 504 * the caller must hold vfsmount_lock 505 */ 506 static void commit_tree(struct vfsmount *mnt) 507 { 508 struct vfsmount *parent = mnt->mnt_parent; 509 struct vfsmount *m; 510 LIST_HEAD(head); 511 struct mnt_namespace *n = parent->mnt_ns; 512 513 BUG_ON(parent == mnt); 514 515 list_add_tail(&head, &mnt->mnt_list); 516 list_for_each_entry(m, &head, mnt_list) 517 m->mnt_ns = n; 518 list_splice(&head, n->list.prev); 519 520 list_add_tail(&mnt->mnt_hash, mount_hashtable + 521 hash(parent, mnt->mnt_mountpoint)); 522 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); 523 touch_mnt_namespace(n); 524 } 525 526 static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root) 527 { 528 struct list_head *next = p->mnt_mounts.next; 529 if (next == &p->mnt_mounts) { 530 while (1) { 531 if (p == root) 532 return NULL; 533 next = p->mnt_child.next; 534 if (next != &p->mnt_parent->mnt_mounts) 535 break; 536 p = p->mnt_parent; 537 } 538 } 539 return list_entry(next, struct vfsmount, mnt_child); 540 } 541 542 static struct vfsmount *skip_mnt_tree(struct vfsmount *p) 543 { 544 struct list_head *prev = p->mnt_mounts.prev; 545 while (prev != &p->mnt_mounts) { 546 p = list_entry(prev, struct vfsmount, mnt_child); 547 prev = p->mnt_mounts.prev; 548 } 549 return p; 550 } 551 552 static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root, 553 int flag) 554 { 555 struct super_block *sb = old->mnt_sb; 556 struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname); 557 558 if (mnt) { 559 if (flag & (CL_SLAVE | CL_PRIVATE)) 560 mnt->mnt_group_id = 0; /* not a peer of original */ 561 else 562 mnt->mnt_group_id = old->mnt_group_id; 563 564 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { 565 int err = mnt_alloc_group_id(mnt); 566 if (err) 567 goto out_free; 568 } 569 570 mnt->mnt_flags = old->mnt_flags; 571 atomic_inc(&sb->s_active); 572 mnt->mnt_sb = sb; 573 mnt->mnt_root = dget(root); 574 mnt->mnt_mountpoint = mnt->mnt_root; 575 mnt->mnt_parent = mnt; 576 577 if (flag & CL_SLAVE) { 578 list_add(&mnt->mnt_slave, &old->mnt_slave_list); 579 mnt->mnt_master = old; 580 CLEAR_MNT_SHARED(mnt); 581 } else if (!(flag & CL_PRIVATE)) { 582 if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old)) 583 list_add(&mnt->mnt_share, &old->mnt_share); 584 if (IS_MNT_SLAVE(old)) 585 list_add(&mnt->mnt_slave, &old->mnt_slave); 586 mnt->mnt_master = old->mnt_master; 587 } 588 if (flag & CL_MAKE_SHARED) 589 set_mnt_shared(mnt); 590 591 /* stick the duplicate mount on the same expiry list 592 * as the original if that was on one */ 593 if (flag & CL_EXPIRE) { 594 if (!list_empty(&old->mnt_expire)) 595 list_add(&mnt->mnt_expire, &old->mnt_expire); 596 } 597 } 598 return mnt; 599 600 out_free: 601 free_vfsmnt(mnt); 602 return NULL; 603 } 604 605 static inline void __mntput(struct vfsmount *mnt) 606 { 607 int cpu; 608 struct super_block *sb = mnt->mnt_sb; 609 /* 610 * We don't have to hold all of the locks at the 611 * same time here because we know that we're the 612 * last reference to mnt and that no new writers 613 * can come in. 614 */ 615 for_each_possible_cpu(cpu) { 616 struct mnt_writer *cpu_writer = &per_cpu(mnt_writers, cpu); 617 spin_lock(&cpu_writer->lock); 618 if (cpu_writer->mnt != mnt) { 619 spin_unlock(&cpu_writer->lock); 620 continue; 621 } 622 atomic_add(cpu_writer->count, &mnt->__mnt_writers); 623 cpu_writer->count = 0; 624 /* 625 * Might as well do this so that no one 626 * ever sees the pointer and expects 627 * it to be valid. 628 */ 629 cpu_writer->mnt = NULL; 630 spin_unlock(&cpu_writer->lock); 631 } 632 /* 633 * This probably indicates that somebody messed 634 * up a mnt_want/drop_write() pair. If this 635 * happens, the filesystem was probably unable 636 * to make r/w->r/o transitions. 637 */ 638 WARN_ON(atomic_read(&mnt->__mnt_writers)); 639 dput(mnt->mnt_root); 640 free_vfsmnt(mnt); 641 deactivate_super(sb); 642 } 643 644 void mntput_no_expire(struct vfsmount *mnt) 645 { 646 repeat: 647 if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) { 648 if (likely(!mnt->mnt_pinned)) { 649 spin_unlock(&vfsmount_lock); 650 __mntput(mnt); 651 return; 652 } 653 atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count); 654 mnt->mnt_pinned = 0; 655 spin_unlock(&vfsmount_lock); 656 acct_auto_close_mnt(mnt); 657 security_sb_umount_close(mnt); 658 goto repeat; 659 } 660 } 661 662 EXPORT_SYMBOL(mntput_no_expire); 663 664 void mnt_pin(struct vfsmount *mnt) 665 { 666 spin_lock(&vfsmount_lock); 667 mnt->mnt_pinned++; 668 spin_unlock(&vfsmount_lock); 669 } 670 671 EXPORT_SYMBOL(mnt_pin); 672 673 void mnt_unpin(struct vfsmount *mnt) 674 { 675 spin_lock(&vfsmount_lock); 676 if (mnt->mnt_pinned) { 677 atomic_inc(&mnt->mnt_count); 678 mnt->mnt_pinned--; 679 } 680 spin_unlock(&vfsmount_lock); 681 } 682 683 EXPORT_SYMBOL(mnt_unpin); 684 685 static inline void mangle(struct seq_file *m, const char *s) 686 { 687 seq_escape(m, s, " \t\n\\"); 688 } 689 690 /* 691 * Simple .show_options callback for filesystems which don't want to 692 * implement more complex mount option showing. 693 * 694 * See also save_mount_options(). 695 */ 696 int generic_show_options(struct seq_file *m, struct vfsmount *mnt) 697 { 698 const char *options = mnt->mnt_sb->s_options; 699 700 if (options != NULL && options[0]) { 701 seq_putc(m, ','); 702 mangle(m, options); 703 } 704 705 return 0; 706 } 707 EXPORT_SYMBOL(generic_show_options); 708 709 /* 710 * If filesystem uses generic_show_options(), this function should be 711 * called from the fill_super() callback. 712 * 713 * The .remount_fs callback usually needs to be handled in a special 714 * way, to make sure, that previous options are not overwritten if the 715 * remount fails. 716 * 717 * Also note, that if the filesystem's .remount_fs function doesn't 718 * reset all options to their default value, but changes only newly 719 * given options, then the displayed options will not reflect reality 720 * any more. 721 */ 722 void save_mount_options(struct super_block *sb, char *options) 723 { 724 kfree(sb->s_options); 725 sb->s_options = kstrdup(options, GFP_KERNEL); 726 } 727 EXPORT_SYMBOL(save_mount_options); 728 729 #ifdef CONFIG_PROC_FS 730 /* iterator */ 731 static void *m_start(struct seq_file *m, loff_t *pos) 732 { 733 struct proc_mounts *p = m->private; 734 735 down_read(&namespace_sem); 736 return seq_list_start(&p->ns->list, *pos); 737 } 738 739 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 740 { 741 struct proc_mounts *p = m->private; 742 743 return seq_list_next(v, &p->ns->list, pos); 744 } 745 746 static void m_stop(struct seq_file *m, void *v) 747 { 748 up_read(&namespace_sem); 749 } 750 751 struct proc_fs_info { 752 int flag; 753 const char *str; 754 }; 755 756 static int show_sb_opts(struct seq_file *m, struct super_block *sb) 757 { 758 static const struct proc_fs_info fs_info[] = { 759 { MS_SYNCHRONOUS, ",sync" }, 760 { MS_DIRSYNC, ",dirsync" }, 761 { MS_MANDLOCK, ",mand" }, 762 { 0, NULL } 763 }; 764 const struct proc_fs_info *fs_infop; 765 766 for (fs_infop = fs_info; fs_infop->flag; fs_infop++) { 767 if (sb->s_flags & fs_infop->flag) 768 seq_puts(m, fs_infop->str); 769 } 770 771 return security_sb_show_options(m, sb); 772 } 773 774 static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt) 775 { 776 static const struct proc_fs_info mnt_info[] = { 777 { MNT_NOSUID, ",nosuid" }, 778 { MNT_NODEV, ",nodev" }, 779 { MNT_NOEXEC, ",noexec" }, 780 { MNT_NOATIME, ",noatime" }, 781 { MNT_NODIRATIME, ",nodiratime" }, 782 { MNT_RELATIME, ",relatime" }, 783 { MNT_STRICTATIME, ",strictatime" }, 784 { 0, NULL } 785 }; 786 const struct proc_fs_info *fs_infop; 787 788 for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) { 789 if (mnt->mnt_flags & fs_infop->flag) 790 seq_puts(m, fs_infop->str); 791 } 792 } 793 794 static void show_type(struct seq_file *m, struct super_block *sb) 795 { 796 mangle(m, sb->s_type->name); 797 if (sb->s_subtype && sb->s_subtype[0]) { 798 seq_putc(m, '.'); 799 mangle(m, sb->s_subtype); 800 } 801 } 802 803 static int show_vfsmnt(struct seq_file *m, void *v) 804 { 805 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list); 806 int err = 0; 807 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; 808 809 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none"); 810 seq_putc(m, ' '); 811 seq_path(m, &mnt_path, " \t\n\\"); 812 seq_putc(m, ' '); 813 show_type(m, mnt->mnt_sb); 814 seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw"); 815 err = show_sb_opts(m, mnt->mnt_sb); 816 if (err) 817 goto out; 818 show_mnt_opts(m, mnt); 819 if (mnt->mnt_sb->s_op->show_options) 820 err = mnt->mnt_sb->s_op->show_options(m, mnt); 821 seq_puts(m, " 0 0\n"); 822 out: 823 return err; 824 } 825 826 const struct seq_operations mounts_op = { 827 .start = m_start, 828 .next = m_next, 829 .stop = m_stop, 830 .show = show_vfsmnt 831 }; 832 833 static int show_mountinfo(struct seq_file *m, void *v) 834 { 835 struct proc_mounts *p = m->private; 836 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list); 837 struct super_block *sb = mnt->mnt_sb; 838 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; 839 struct path root = p->root; 840 int err = 0; 841 842 seq_printf(m, "%i %i %u:%u ", mnt->mnt_id, mnt->mnt_parent->mnt_id, 843 MAJOR(sb->s_dev), MINOR(sb->s_dev)); 844 seq_dentry(m, mnt->mnt_root, " \t\n\\"); 845 seq_putc(m, ' '); 846 seq_path_root(m, &mnt_path, &root, " \t\n\\"); 847 if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) { 848 /* 849 * Mountpoint is outside root, discard that one. Ugly, 850 * but less so than trying to do that in iterator in a 851 * race-free way (due to renames). 852 */ 853 return SEQ_SKIP; 854 } 855 seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw"); 856 show_mnt_opts(m, mnt); 857 858 /* Tagged fields ("foo:X" or "bar") */ 859 if (IS_MNT_SHARED(mnt)) 860 seq_printf(m, " shared:%i", mnt->mnt_group_id); 861 if (IS_MNT_SLAVE(mnt)) { 862 int master = mnt->mnt_master->mnt_group_id; 863 int dom = get_dominating_id(mnt, &p->root); 864 seq_printf(m, " master:%i", master); 865 if (dom && dom != master) 866 seq_printf(m, " propagate_from:%i", dom); 867 } 868 if (IS_MNT_UNBINDABLE(mnt)) 869 seq_puts(m, " unbindable"); 870 871 /* Filesystem specific data */ 872 seq_puts(m, " - "); 873 show_type(m, sb); 874 seq_putc(m, ' '); 875 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none"); 876 seq_puts(m, sb->s_flags & MS_RDONLY ? " ro" : " rw"); 877 err = show_sb_opts(m, sb); 878 if (err) 879 goto out; 880 if (sb->s_op->show_options) 881 err = sb->s_op->show_options(m, mnt); 882 seq_putc(m, '\n'); 883 out: 884 return err; 885 } 886 887 const struct seq_operations mountinfo_op = { 888 .start = m_start, 889 .next = m_next, 890 .stop = m_stop, 891 .show = show_mountinfo, 892 }; 893 894 static int show_vfsstat(struct seq_file *m, void *v) 895 { 896 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list); 897 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; 898 int err = 0; 899 900 /* device */ 901 if (mnt->mnt_devname) { 902 seq_puts(m, "device "); 903 mangle(m, mnt->mnt_devname); 904 } else 905 seq_puts(m, "no device"); 906 907 /* mount point */ 908 seq_puts(m, " mounted on "); 909 seq_path(m, &mnt_path, " \t\n\\"); 910 seq_putc(m, ' '); 911 912 /* file system type */ 913 seq_puts(m, "with fstype "); 914 show_type(m, mnt->mnt_sb); 915 916 /* optional statistics */ 917 if (mnt->mnt_sb->s_op->show_stats) { 918 seq_putc(m, ' '); 919 err = mnt->mnt_sb->s_op->show_stats(m, mnt); 920 } 921 922 seq_putc(m, '\n'); 923 return err; 924 } 925 926 const struct seq_operations mountstats_op = { 927 .start = m_start, 928 .next = m_next, 929 .stop = m_stop, 930 .show = show_vfsstat, 931 }; 932 #endif /* CONFIG_PROC_FS */ 933 934 /** 935 * may_umount_tree - check if a mount tree is busy 936 * @mnt: root of mount tree 937 * 938 * This is called to check if a tree of mounts has any 939 * open files, pwds, chroots or sub mounts that are 940 * busy. 941 */ 942 int may_umount_tree(struct vfsmount *mnt) 943 { 944 int actual_refs = 0; 945 int minimum_refs = 0; 946 struct vfsmount *p; 947 948 spin_lock(&vfsmount_lock); 949 for (p = mnt; p; p = next_mnt(p, mnt)) { 950 actual_refs += atomic_read(&p->mnt_count); 951 minimum_refs += 2; 952 } 953 spin_unlock(&vfsmount_lock); 954 955 if (actual_refs > minimum_refs) 956 return 0; 957 958 return 1; 959 } 960 961 EXPORT_SYMBOL(may_umount_tree); 962 963 /** 964 * may_umount - check if a mount point is busy 965 * @mnt: root of mount 966 * 967 * This is called to check if a mount point has any 968 * open files, pwds, chroots or sub mounts. If the 969 * mount has sub mounts this will return busy 970 * regardless of whether the sub mounts are busy. 971 * 972 * Doesn't take quota and stuff into account. IOW, in some cases it will 973 * give false negatives. The main reason why it's here is that we need 974 * a non-destructive way to look for easily umountable filesystems. 975 */ 976 int may_umount(struct vfsmount *mnt) 977 { 978 int ret = 1; 979 spin_lock(&vfsmount_lock); 980 if (propagate_mount_busy(mnt, 2)) 981 ret = 0; 982 spin_unlock(&vfsmount_lock); 983 return ret; 984 } 985 986 EXPORT_SYMBOL(may_umount); 987 988 void release_mounts(struct list_head *head) 989 { 990 struct vfsmount *mnt; 991 while (!list_empty(head)) { 992 mnt = list_first_entry(head, struct vfsmount, mnt_hash); 993 list_del_init(&mnt->mnt_hash); 994 if (mnt->mnt_parent != mnt) { 995 struct dentry *dentry; 996 struct vfsmount *m; 997 spin_lock(&vfsmount_lock); 998 dentry = mnt->mnt_mountpoint; 999 m = mnt->mnt_parent; 1000 mnt->mnt_mountpoint = mnt->mnt_root; 1001 mnt->mnt_parent = mnt; 1002 m->mnt_ghosts--; 1003 spin_unlock(&vfsmount_lock); 1004 dput(dentry); 1005 mntput(m); 1006 } 1007 mntput(mnt); 1008 } 1009 } 1010 1011 void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill) 1012 { 1013 struct vfsmount *p; 1014 1015 for (p = mnt; p; p = next_mnt(p, mnt)) 1016 list_move(&p->mnt_hash, kill); 1017 1018 if (propagate) 1019 propagate_umount(kill); 1020 1021 list_for_each_entry(p, kill, mnt_hash) { 1022 list_del_init(&p->mnt_expire); 1023 list_del_init(&p->mnt_list); 1024 __touch_mnt_namespace(p->mnt_ns); 1025 p->mnt_ns = NULL; 1026 list_del_init(&p->mnt_child); 1027 if (p->mnt_parent != p) { 1028 p->mnt_parent->mnt_ghosts++; 1029 p->mnt_mountpoint->d_mounted--; 1030 } 1031 change_mnt_propagation(p, MS_PRIVATE); 1032 } 1033 } 1034 1035 static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts); 1036 1037 static int do_umount(struct vfsmount *mnt, int flags) 1038 { 1039 struct super_block *sb = mnt->mnt_sb; 1040 int retval; 1041 LIST_HEAD(umount_list); 1042 1043 retval = security_sb_umount(mnt, flags); 1044 if (retval) 1045 return retval; 1046 1047 /* 1048 * Allow userspace to request a mountpoint be expired rather than 1049 * unmounting unconditionally. Unmount only happens if: 1050 * (1) the mark is already set (the mark is cleared by mntput()) 1051 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] 1052 */ 1053 if (flags & MNT_EXPIRE) { 1054 if (mnt == current->fs->root.mnt || 1055 flags & (MNT_FORCE | MNT_DETACH)) 1056 return -EINVAL; 1057 1058 if (atomic_read(&mnt->mnt_count) != 2) 1059 return -EBUSY; 1060 1061 if (!xchg(&mnt->mnt_expiry_mark, 1)) 1062 return -EAGAIN; 1063 } 1064 1065 /* 1066 * If we may have to abort operations to get out of this 1067 * mount, and they will themselves hold resources we must 1068 * allow the fs to do things. In the Unix tradition of 1069 * 'Gee thats tricky lets do it in userspace' the umount_begin 1070 * might fail to complete on the first run through as other tasks 1071 * must return, and the like. Thats for the mount program to worry 1072 * about for the moment. 1073 */ 1074 1075 if (flags & MNT_FORCE && sb->s_op->umount_begin) { 1076 lock_kernel(); 1077 sb->s_op->umount_begin(sb); 1078 unlock_kernel(); 1079 } 1080 1081 /* 1082 * No sense to grab the lock for this test, but test itself looks 1083 * somewhat bogus. Suggestions for better replacement? 1084 * Ho-hum... In principle, we might treat that as umount + switch 1085 * to rootfs. GC would eventually take care of the old vfsmount. 1086 * Actually it makes sense, especially if rootfs would contain a 1087 * /reboot - static binary that would close all descriptors and 1088 * call reboot(9). Then init(8) could umount root and exec /reboot. 1089 */ 1090 if (mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { 1091 /* 1092 * Special case for "unmounting" root ... 1093 * we just try to remount it readonly. 1094 */ 1095 down_write(&sb->s_umount); 1096 if (!(sb->s_flags & MS_RDONLY)) { 1097 lock_kernel(); 1098 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); 1099 unlock_kernel(); 1100 } 1101 up_write(&sb->s_umount); 1102 return retval; 1103 } 1104 1105 down_write(&namespace_sem); 1106 spin_lock(&vfsmount_lock); 1107 event++; 1108 1109 if (!(flags & MNT_DETACH)) 1110 shrink_submounts(mnt, &umount_list); 1111 1112 retval = -EBUSY; 1113 if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) { 1114 if (!list_empty(&mnt->mnt_list)) 1115 umount_tree(mnt, 1, &umount_list); 1116 retval = 0; 1117 } 1118 spin_unlock(&vfsmount_lock); 1119 if (retval) 1120 security_sb_umount_busy(mnt); 1121 up_write(&namespace_sem); 1122 release_mounts(&umount_list); 1123 return retval; 1124 } 1125 1126 /* 1127 * Now umount can handle mount points as well as block devices. 1128 * This is important for filesystems which use unnamed block devices. 1129 * 1130 * We now support a flag for forced unmount like the other 'big iron' 1131 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD 1132 */ 1133 1134 SYSCALL_DEFINE2(umount, char __user *, name, int, flags) 1135 { 1136 struct path path; 1137 int retval; 1138 1139 retval = user_path(name, &path); 1140 if (retval) 1141 goto out; 1142 retval = -EINVAL; 1143 if (path.dentry != path.mnt->mnt_root) 1144 goto dput_and_out; 1145 if (!check_mnt(path.mnt)) 1146 goto dput_and_out; 1147 1148 retval = -EPERM; 1149 if (!capable(CAP_SYS_ADMIN)) 1150 goto dput_and_out; 1151 1152 retval = do_umount(path.mnt, flags); 1153 dput_and_out: 1154 /* we mustn't call path_put() as that would clear mnt_expiry_mark */ 1155 dput(path.dentry); 1156 mntput_no_expire(path.mnt); 1157 out: 1158 return retval; 1159 } 1160 1161 #ifdef __ARCH_WANT_SYS_OLDUMOUNT 1162 1163 /* 1164 * The 2.0 compatible umount. No flags. 1165 */ 1166 SYSCALL_DEFINE1(oldumount, char __user *, name) 1167 { 1168 return sys_umount(name, 0); 1169 } 1170 1171 #endif 1172 1173 static int mount_is_safe(struct path *path) 1174 { 1175 if (capable(CAP_SYS_ADMIN)) 1176 return 0; 1177 return -EPERM; 1178 #ifdef notyet 1179 if (S_ISLNK(path->dentry->d_inode->i_mode)) 1180 return -EPERM; 1181 if (path->dentry->d_inode->i_mode & S_ISVTX) { 1182 if (current_uid() != path->dentry->d_inode->i_uid) 1183 return -EPERM; 1184 } 1185 if (inode_permission(path->dentry->d_inode, MAY_WRITE)) 1186 return -EPERM; 1187 return 0; 1188 #endif 1189 } 1190 1191 struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry, 1192 int flag) 1193 { 1194 struct vfsmount *res, *p, *q, *r, *s; 1195 struct path path; 1196 1197 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt)) 1198 return NULL; 1199 1200 res = q = clone_mnt(mnt, dentry, flag); 1201 if (!q) 1202 goto Enomem; 1203 q->mnt_mountpoint = mnt->mnt_mountpoint; 1204 1205 p = mnt; 1206 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) { 1207 if (!is_subdir(r->mnt_mountpoint, dentry)) 1208 continue; 1209 1210 for (s = r; s; s = next_mnt(s, r)) { 1211 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) { 1212 s = skip_mnt_tree(s); 1213 continue; 1214 } 1215 while (p != s->mnt_parent) { 1216 p = p->mnt_parent; 1217 q = q->mnt_parent; 1218 } 1219 p = s; 1220 path.mnt = q; 1221 path.dentry = p->mnt_mountpoint; 1222 q = clone_mnt(p, p->mnt_root, flag); 1223 if (!q) 1224 goto Enomem; 1225 spin_lock(&vfsmount_lock); 1226 list_add_tail(&q->mnt_list, &res->mnt_list); 1227 attach_mnt(q, &path); 1228 spin_unlock(&vfsmount_lock); 1229 } 1230 } 1231 return res; 1232 Enomem: 1233 if (res) { 1234 LIST_HEAD(umount_list); 1235 spin_lock(&vfsmount_lock); 1236 umount_tree(res, 0, &umount_list); 1237 spin_unlock(&vfsmount_lock); 1238 release_mounts(&umount_list); 1239 } 1240 return NULL; 1241 } 1242 1243 struct vfsmount *collect_mounts(struct vfsmount *mnt, struct dentry *dentry) 1244 { 1245 struct vfsmount *tree; 1246 down_write(&namespace_sem); 1247 tree = copy_tree(mnt, dentry, CL_COPY_ALL | CL_PRIVATE); 1248 up_write(&namespace_sem); 1249 return tree; 1250 } 1251 1252 void drop_collected_mounts(struct vfsmount *mnt) 1253 { 1254 LIST_HEAD(umount_list); 1255 down_write(&namespace_sem); 1256 spin_lock(&vfsmount_lock); 1257 umount_tree(mnt, 0, &umount_list); 1258 spin_unlock(&vfsmount_lock); 1259 up_write(&namespace_sem); 1260 release_mounts(&umount_list); 1261 } 1262 1263 static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end) 1264 { 1265 struct vfsmount *p; 1266 1267 for (p = mnt; p != end; p = next_mnt(p, mnt)) { 1268 if (p->mnt_group_id && !IS_MNT_SHARED(p)) 1269 mnt_release_group_id(p); 1270 } 1271 } 1272 1273 static int invent_group_ids(struct vfsmount *mnt, bool recurse) 1274 { 1275 struct vfsmount *p; 1276 1277 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) { 1278 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) { 1279 int err = mnt_alloc_group_id(p); 1280 if (err) { 1281 cleanup_group_ids(mnt, p); 1282 return err; 1283 } 1284 } 1285 } 1286 1287 return 0; 1288 } 1289 1290 /* 1291 * @source_mnt : mount tree to be attached 1292 * @nd : place the mount tree @source_mnt is attached 1293 * @parent_nd : if non-null, detach the source_mnt from its parent and 1294 * store the parent mount and mountpoint dentry. 1295 * (done when source_mnt is moved) 1296 * 1297 * NOTE: in the table below explains the semantics when a source mount 1298 * of a given type is attached to a destination mount of a given type. 1299 * --------------------------------------------------------------------------- 1300 * | BIND MOUNT OPERATION | 1301 * |************************************************************************** 1302 * | source-->| shared | private | slave | unbindable | 1303 * | dest | | | | | 1304 * | | | | | | | 1305 * | v | | | | | 1306 * |************************************************************************** 1307 * | shared | shared (++) | shared (+) | shared(+++)| invalid | 1308 * | | | | | | 1309 * |non-shared| shared (+) | private | slave (*) | invalid | 1310 * *************************************************************************** 1311 * A bind operation clones the source mount and mounts the clone on the 1312 * destination mount. 1313 * 1314 * (++) the cloned mount is propagated to all the mounts in the propagation 1315 * tree of the destination mount and the cloned mount is added to 1316 * the peer group of the source mount. 1317 * (+) the cloned mount is created under the destination mount and is marked 1318 * as shared. The cloned mount is added to the peer group of the source 1319 * mount. 1320 * (+++) the mount is propagated to all the mounts in the propagation tree 1321 * of the destination mount and the cloned mount is made slave 1322 * of the same master as that of the source mount. The cloned mount 1323 * is marked as 'shared and slave'. 1324 * (*) the cloned mount is made a slave of the same master as that of the 1325 * source mount. 1326 * 1327 * --------------------------------------------------------------------------- 1328 * | MOVE MOUNT OPERATION | 1329 * |************************************************************************** 1330 * | source-->| shared | private | slave | unbindable | 1331 * | dest | | | | | 1332 * | | | | | | | 1333 * | v | | | | | 1334 * |************************************************************************** 1335 * | shared | shared (+) | shared (+) | shared(+++) | invalid | 1336 * | | | | | | 1337 * |non-shared| shared (+*) | private | slave (*) | unbindable | 1338 * *************************************************************************** 1339 * 1340 * (+) the mount is moved to the destination. And is then propagated to 1341 * all the mounts in the propagation tree of the destination mount. 1342 * (+*) the mount is moved to the destination. 1343 * (+++) the mount is moved to the destination and is then propagated to 1344 * all the mounts belonging to the destination mount's propagation tree. 1345 * the mount is marked as 'shared and slave'. 1346 * (*) the mount continues to be a slave at the new location. 1347 * 1348 * if the source mount is a tree, the operations explained above is 1349 * applied to each mount in the tree. 1350 * Must be called without spinlocks held, since this function can sleep 1351 * in allocations. 1352 */ 1353 static int attach_recursive_mnt(struct vfsmount *source_mnt, 1354 struct path *path, struct path *parent_path) 1355 { 1356 LIST_HEAD(tree_list); 1357 struct vfsmount *dest_mnt = path->mnt; 1358 struct dentry *dest_dentry = path->dentry; 1359 struct vfsmount *child, *p; 1360 int err; 1361 1362 if (IS_MNT_SHARED(dest_mnt)) { 1363 err = invent_group_ids(source_mnt, true); 1364 if (err) 1365 goto out; 1366 } 1367 err = propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list); 1368 if (err) 1369 goto out_cleanup_ids; 1370 1371 if (IS_MNT_SHARED(dest_mnt)) { 1372 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 1373 set_mnt_shared(p); 1374 } 1375 1376 spin_lock(&vfsmount_lock); 1377 if (parent_path) { 1378 detach_mnt(source_mnt, parent_path); 1379 attach_mnt(source_mnt, path); 1380 touch_mnt_namespace(current->nsproxy->mnt_ns); 1381 } else { 1382 mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt); 1383 commit_tree(source_mnt); 1384 } 1385 1386 list_for_each_entry_safe(child, p, &tree_list, mnt_hash) { 1387 list_del_init(&child->mnt_hash); 1388 commit_tree(child); 1389 } 1390 spin_unlock(&vfsmount_lock); 1391 return 0; 1392 1393 out_cleanup_ids: 1394 if (IS_MNT_SHARED(dest_mnt)) 1395 cleanup_group_ids(source_mnt, NULL); 1396 out: 1397 return err; 1398 } 1399 1400 static int graft_tree(struct vfsmount *mnt, struct path *path) 1401 { 1402 int err; 1403 if (mnt->mnt_sb->s_flags & MS_NOUSER) 1404 return -EINVAL; 1405 1406 if (S_ISDIR(path->dentry->d_inode->i_mode) != 1407 S_ISDIR(mnt->mnt_root->d_inode->i_mode)) 1408 return -ENOTDIR; 1409 1410 err = -ENOENT; 1411 mutex_lock(&path->dentry->d_inode->i_mutex); 1412 if (IS_DEADDIR(path->dentry->d_inode)) 1413 goto out_unlock; 1414 1415 err = security_sb_check_sb(mnt, path); 1416 if (err) 1417 goto out_unlock; 1418 1419 err = -ENOENT; 1420 if (IS_ROOT(path->dentry) || !d_unhashed(path->dentry)) 1421 err = attach_recursive_mnt(mnt, path, NULL); 1422 out_unlock: 1423 mutex_unlock(&path->dentry->d_inode->i_mutex); 1424 if (!err) 1425 security_sb_post_addmount(mnt, path); 1426 return err; 1427 } 1428 1429 /* 1430 * recursively change the type of the mountpoint. 1431 */ 1432 static int do_change_type(struct path *path, int flag) 1433 { 1434 struct vfsmount *m, *mnt = path->mnt; 1435 int recurse = flag & MS_REC; 1436 int type = flag & ~MS_REC; 1437 int err = 0; 1438 1439 if (!capable(CAP_SYS_ADMIN)) 1440 return -EPERM; 1441 1442 if (path->dentry != path->mnt->mnt_root) 1443 return -EINVAL; 1444 1445 down_write(&namespace_sem); 1446 if (type == MS_SHARED) { 1447 err = invent_group_ids(mnt, recurse); 1448 if (err) 1449 goto out_unlock; 1450 } 1451 1452 spin_lock(&vfsmount_lock); 1453 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) 1454 change_mnt_propagation(m, type); 1455 spin_unlock(&vfsmount_lock); 1456 1457 out_unlock: 1458 up_write(&namespace_sem); 1459 return err; 1460 } 1461 1462 /* 1463 * do loopback mount. 1464 */ 1465 static int do_loopback(struct path *path, char *old_name, 1466 int recurse) 1467 { 1468 struct path old_path; 1469 struct vfsmount *mnt = NULL; 1470 int err = mount_is_safe(path); 1471 if (err) 1472 return err; 1473 if (!old_name || !*old_name) 1474 return -EINVAL; 1475 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); 1476 if (err) 1477 return err; 1478 1479 down_write(&namespace_sem); 1480 err = -EINVAL; 1481 if (IS_MNT_UNBINDABLE(old_path.mnt)) 1482 goto out; 1483 1484 if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt)) 1485 goto out; 1486 1487 err = -ENOMEM; 1488 if (recurse) 1489 mnt = copy_tree(old_path.mnt, old_path.dentry, 0); 1490 else 1491 mnt = clone_mnt(old_path.mnt, old_path.dentry, 0); 1492 1493 if (!mnt) 1494 goto out; 1495 1496 err = graft_tree(mnt, path); 1497 if (err) { 1498 LIST_HEAD(umount_list); 1499 spin_lock(&vfsmount_lock); 1500 umount_tree(mnt, 0, &umount_list); 1501 spin_unlock(&vfsmount_lock); 1502 release_mounts(&umount_list); 1503 } 1504 1505 out: 1506 up_write(&namespace_sem); 1507 path_put(&old_path); 1508 return err; 1509 } 1510 1511 static int change_mount_flags(struct vfsmount *mnt, int ms_flags) 1512 { 1513 int error = 0; 1514 int readonly_request = 0; 1515 1516 if (ms_flags & MS_RDONLY) 1517 readonly_request = 1; 1518 if (readonly_request == __mnt_is_readonly(mnt)) 1519 return 0; 1520 1521 if (readonly_request) 1522 error = mnt_make_readonly(mnt); 1523 else 1524 __mnt_unmake_readonly(mnt); 1525 return error; 1526 } 1527 1528 /* 1529 * change filesystem flags. dir should be a physical root of filesystem. 1530 * If you've mounted a non-root directory somewhere and want to do remount 1531 * on it - tough luck. 1532 */ 1533 static int do_remount(struct path *path, int flags, int mnt_flags, 1534 void *data) 1535 { 1536 int err; 1537 struct super_block *sb = path->mnt->mnt_sb; 1538 1539 if (!capable(CAP_SYS_ADMIN)) 1540 return -EPERM; 1541 1542 if (!check_mnt(path->mnt)) 1543 return -EINVAL; 1544 1545 if (path->dentry != path->mnt->mnt_root) 1546 return -EINVAL; 1547 1548 down_write(&sb->s_umount); 1549 if (flags & MS_BIND) 1550 err = change_mount_flags(path->mnt, flags); 1551 else 1552 err = do_remount_sb(sb, flags, data, 0); 1553 if (!err) 1554 path->mnt->mnt_flags = mnt_flags; 1555 up_write(&sb->s_umount); 1556 if (!err) { 1557 security_sb_post_remount(path->mnt, flags, data); 1558 1559 spin_lock(&vfsmount_lock); 1560 touch_mnt_namespace(path->mnt->mnt_ns); 1561 spin_unlock(&vfsmount_lock); 1562 } 1563 return err; 1564 } 1565 1566 static inline int tree_contains_unbindable(struct vfsmount *mnt) 1567 { 1568 struct vfsmount *p; 1569 for (p = mnt; p; p = next_mnt(p, mnt)) { 1570 if (IS_MNT_UNBINDABLE(p)) 1571 return 1; 1572 } 1573 return 0; 1574 } 1575 1576 static int do_move_mount(struct path *path, char *old_name) 1577 { 1578 struct path old_path, parent_path; 1579 struct vfsmount *p; 1580 int err = 0; 1581 if (!capable(CAP_SYS_ADMIN)) 1582 return -EPERM; 1583 if (!old_name || !*old_name) 1584 return -EINVAL; 1585 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); 1586 if (err) 1587 return err; 1588 1589 down_write(&namespace_sem); 1590 while (d_mountpoint(path->dentry) && 1591 follow_down(&path->mnt, &path->dentry)) 1592 ; 1593 err = -EINVAL; 1594 if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt)) 1595 goto out; 1596 1597 err = -ENOENT; 1598 mutex_lock(&path->dentry->d_inode->i_mutex); 1599 if (IS_DEADDIR(path->dentry->d_inode)) 1600 goto out1; 1601 1602 if (!IS_ROOT(path->dentry) && d_unhashed(path->dentry)) 1603 goto out1; 1604 1605 err = -EINVAL; 1606 if (old_path.dentry != old_path.mnt->mnt_root) 1607 goto out1; 1608 1609 if (old_path.mnt == old_path.mnt->mnt_parent) 1610 goto out1; 1611 1612 if (S_ISDIR(path->dentry->d_inode->i_mode) != 1613 S_ISDIR(old_path.dentry->d_inode->i_mode)) 1614 goto out1; 1615 /* 1616 * Don't move a mount residing in a shared parent. 1617 */ 1618 if (old_path.mnt->mnt_parent && 1619 IS_MNT_SHARED(old_path.mnt->mnt_parent)) 1620 goto out1; 1621 /* 1622 * Don't move a mount tree containing unbindable mounts to a destination 1623 * mount which is shared. 1624 */ 1625 if (IS_MNT_SHARED(path->mnt) && 1626 tree_contains_unbindable(old_path.mnt)) 1627 goto out1; 1628 err = -ELOOP; 1629 for (p = path->mnt; p->mnt_parent != p; p = p->mnt_parent) 1630 if (p == old_path.mnt) 1631 goto out1; 1632 1633 err = attach_recursive_mnt(old_path.mnt, path, &parent_path); 1634 if (err) 1635 goto out1; 1636 1637 /* if the mount is moved, it should no longer be expire 1638 * automatically */ 1639 list_del_init(&old_path.mnt->mnt_expire); 1640 out1: 1641 mutex_unlock(&path->dentry->d_inode->i_mutex); 1642 out: 1643 up_write(&namespace_sem); 1644 if (!err) 1645 path_put(&parent_path); 1646 path_put(&old_path); 1647 return err; 1648 } 1649 1650 /* 1651 * create a new mount for userspace and request it to be added into the 1652 * namespace's tree 1653 */ 1654 static int do_new_mount(struct path *path, char *type, int flags, 1655 int mnt_flags, char *name, void *data) 1656 { 1657 struct vfsmount *mnt; 1658 1659 if (!type || !memchr(type, 0, PAGE_SIZE)) 1660 return -EINVAL; 1661 1662 /* we need capabilities... */ 1663 if (!capable(CAP_SYS_ADMIN)) 1664 return -EPERM; 1665 1666 mnt = do_kern_mount(type, flags, name, data); 1667 if (IS_ERR(mnt)) 1668 return PTR_ERR(mnt); 1669 1670 return do_add_mount(mnt, path, mnt_flags, NULL); 1671 } 1672 1673 /* 1674 * add a mount into a namespace's mount tree 1675 * - provide the option of adding the new mount to an expiration list 1676 */ 1677 int do_add_mount(struct vfsmount *newmnt, struct path *path, 1678 int mnt_flags, struct list_head *fslist) 1679 { 1680 int err; 1681 1682 down_write(&namespace_sem); 1683 /* Something was mounted here while we slept */ 1684 while (d_mountpoint(path->dentry) && 1685 follow_down(&path->mnt, &path->dentry)) 1686 ; 1687 err = -EINVAL; 1688 if (!check_mnt(path->mnt)) 1689 goto unlock; 1690 1691 /* Refuse the same filesystem on the same mount point */ 1692 err = -EBUSY; 1693 if (path->mnt->mnt_sb == newmnt->mnt_sb && 1694 path->mnt->mnt_root == path->dentry) 1695 goto unlock; 1696 1697 err = -EINVAL; 1698 if (S_ISLNK(newmnt->mnt_root->d_inode->i_mode)) 1699 goto unlock; 1700 1701 newmnt->mnt_flags = mnt_flags; 1702 if ((err = graft_tree(newmnt, path))) 1703 goto unlock; 1704 1705 if (fslist) /* add to the specified expiration list */ 1706 list_add_tail(&newmnt->mnt_expire, fslist); 1707 1708 up_write(&namespace_sem); 1709 return 0; 1710 1711 unlock: 1712 up_write(&namespace_sem); 1713 mntput(newmnt); 1714 return err; 1715 } 1716 1717 EXPORT_SYMBOL_GPL(do_add_mount); 1718 1719 /* 1720 * process a list of expirable mountpoints with the intent of discarding any 1721 * mountpoints that aren't in use and haven't been touched since last we came 1722 * here 1723 */ 1724 void mark_mounts_for_expiry(struct list_head *mounts) 1725 { 1726 struct vfsmount *mnt, *next; 1727 LIST_HEAD(graveyard); 1728 LIST_HEAD(umounts); 1729 1730 if (list_empty(mounts)) 1731 return; 1732 1733 down_write(&namespace_sem); 1734 spin_lock(&vfsmount_lock); 1735 1736 /* extract from the expiration list every vfsmount that matches the 1737 * following criteria: 1738 * - only referenced by its parent vfsmount 1739 * - still marked for expiry (marked on the last call here; marks are 1740 * cleared by mntput()) 1741 */ 1742 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) { 1743 if (!xchg(&mnt->mnt_expiry_mark, 1) || 1744 propagate_mount_busy(mnt, 1)) 1745 continue; 1746 list_move(&mnt->mnt_expire, &graveyard); 1747 } 1748 while (!list_empty(&graveyard)) { 1749 mnt = list_first_entry(&graveyard, struct vfsmount, mnt_expire); 1750 touch_mnt_namespace(mnt->mnt_ns); 1751 umount_tree(mnt, 1, &umounts); 1752 } 1753 spin_unlock(&vfsmount_lock); 1754 up_write(&namespace_sem); 1755 1756 release_mounts(&umounts); 1757 } 1758 1759 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry); 1760 1761 /* 1762 * Ripoff of 'select_parent()' 1763 * 1764 * search the list of submounts for a given mountpoint, and move any 1765 * shrinkable submounts to the 'graveyard' list. 1766 */ 1767 static int select_submounts(struct vfsmount *parent, struct list_head *graveyard) 1768 { 1769 struct vfsmount *this_parent = parent; 1770 struct list_head *next; 1771 int found = 0; 1772 1773 repeat: 1774 next = this_parent->mnt_mounts.next; 1775 resume: 1776 while (next != &this_parent->mnt_mounts) { 1777 struct list_head *tmp = next; 1778 struct vfsmount *mnt = list_entry(tmp, struct vfsmount, mnt_child); 1779 1780 next = tmp->next; 1781 if (!(mnt->mnt_flags & MNT_SHRINKABLE)) 1782 continue; 1783 /* 1784 * Descend a level if the d_mounts list is non-empty. 1785 */ 1786 if (!list_empty(&mnt->mnt_mounts)) { 1787 this_parent = mnt; 1788 goto repeat; 1789 } 1790 1791 if (!propagate_mount_busy(mnt, 1)) { 1792 list_move_tail(&mnt->mnt_expire, graveyard); 1793 found++; 1794 } 1795 } 1796 /* 1797 * All done at this level ... ascend and resume the search 1798 */ 1799 if (this_parent != parent) { 1800 next = this_parent->mnt_child.next; 1801 this_parent = this_parent->mnt_parent; 1802 goto resume; 1803 } 1804 return found; 1805 } 1806 1807 /* 1808 * process a list of expirable mountpoints with the intent of discarding any 1809 * submounts of a specific parent mountpoint 1810 */ 1811 static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts) 1812 { 1813 LIST_HEAD(graveyard); 1814 struct vfsmount *m; 1815 1816 /* extract submounts of 'mountpoint' from the expiration list */ 1817 while (select_submounts(mnt, &graveyard)) { 1818 while (!list_empty(&graveyard)) { 1819 m = list_first_entry(&graveyard, struct vfsmount, 1820 mnt_expire); 1821 touch_mnt_namespace(m->mnt_ns); 1822 umount_tree(m, 1, umounts); 1823 } 1824 } 1825 } 1826 1827 /* 1828 * Some copy_from_user() implementations do not return the exact number of 1829 * bytes remaining to copy on a fault. But copy_mount_options() requires that. 1830 * Note that this function differs from copy_from_user() in that it will oops 1831 * on bad values of `to', rather than returning a short copy. 1832 */ 1833 static long exact_copy_from_user(void *to, const void __user * from, 1834 unsigned long n) 1835 { 1836 char *t = to; 1837 const char __user *f = from; 1838 char c; 1839 1840 if (!access_ok(VERIFY_READ, from, n)) 1841 return n; 1842 1843 while (n) { 1844 if (__get_user(c, f)) { 1845 memset(t, 0, n); 1846 break; 1847 } 1848 *t++ = c; 1849 f++; 1850 n--; 1851 } 1852 return n; 1853 } 1854 1855 int copy_mount_options(const void __user * data, unsigned long *where) 1856 { 1857 int i; 1858 unsigned long page; 1859 unsigned long size; 1860 1861 *where = 0; 1862 if (!data) 1863 return 0; 1864 1865 if (!(page = __get_free_page(GFP_KERNEL))) 1866 return -ENOMEM; 1867 1868 /* We only care that *some* data at the address the user 1869 * gave us is valid. Just in case, we'll zero 1870 * the remainder of the page. 1871 */ 1872 /* copy_from_user cannot cross TASK_SIZE ! */ 1873 size = TASK_SIZE - (unsigned long)data; 1874 if (size > PAGE_SIZE) 1875 size = PAGE_SIZE; 1876 1877 i = size - exact_copy_from_user((void *)page, data, size); 1878 if (!i) { 1879 free_page(page); 1880 return -EFAULT; 1881 } 1882 if (i != PAGE_SIZE) 1883 memset((char *)page + i, 0, PAGE_SIZE - i); 1884 *where = page; 1885 return 0; 1886 } 1887 1888 /* 1889 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to 1890 * be given to the mount() call (ie: read-only, no-dev, no-suid etc). 1891 * 1892 * data is a (void *) that can point to any structure up to 1893 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent 1894 * information (or be NULL). 1895 * 1896 * Pre-0.97 versions of mount() didn't have a flags word. 1897 * When the flags word was introduced its top half was required 1898 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9. 1899 * Therefore, if this magic number is present, it carries no information 1900 * and must be discarded. 1901 */ 1902 long do_mount(char *dev_name, char *dir_name, char *type_page, 1903 unsigned long flags, void *data_page) 1904 { 1905 struct path path; 1906 int retval = 0; 1907 int mnt_flags = 0; 1908 1909 /* Discard magic */ 1910 if ((flags & MS_MGC_MSK) == MS_MGC_VAL) 1911 flags &= ~MS_MGC_MSK; 1912 1913 /* Basic sanity checks */ 1914 1915 if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE)) 1916 return -EINVAL; 1917 if (dev_name && !memchr(dev_name, 0, PAGE_SIZE)) 1918 return -EINVAL; 1919 1920 if (data_page) 1921 ((char *)data_page)[PAGE_SIZE - 1] = 0; 1922 1923 /* Default to relatime */ 1924 mnt_flags |= MNT_RELATIME; 1925 1926 /* Separate the per-mountpoint flags */ 1927 if (flags & MS_NOSUID) 1928 mnt_flags |= MNT_NOSUID; 1929 if (flags & MS_NODEV) 1930 mnt_flags |= MNT_NODEV; 1931 if (flags & MS_NOEXEC) 1932 mnt_flags |= MNT_NOEXEC; 1933 if (flags & MS_NOATIME) 1934 mnt_flags |= MNT_NOATIME; 1935 if (flags & MS_NODIRATIME) 1936 mnt_flags |= MNT_NODIRATIME; 1937 if (flags & MS_STRICTATIME) 1938 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); 1939 if (flags & MS_RDONLY) 1940 mnt_flags |= MNT_READONLY; 1941 1942 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | 1943 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | 1944 MS_STRICTATIME); 1945 1946 /* ... and get the mountpoint */ 1947 retval = kern_path(dir_name, LOOKUP_FOLLOW, &path); 1948 if (retval) 1949 return retval; 1950 1951 retval = security_sb_mount(dev_name, &path, 1952 type_page, flags, data_page); 1953 if (retval) 1954 goto dput_out; 1955 1956 if (flags & MS_REMOUNT) 1957 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, 1958 data_page); 1959 else if (flags & MS_BIND) 1960 retval = do_loopback(&path, dev_name, flags & MS_REC); 1961 else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) 1962 retval = do_change_type(&path, flags); 1963 else if (flags & MS_MOVE) 1964 retval = do_move_mount(&path, dev_name); 1965 else 1966 retval = do_new_mount(&path, type_page, flags, mnt_flags, 1967 dev_name, data_page); 1968 dput_out: 1969 path_put(&path); 1970 return retval; 1971 } 1972 1973 /* 1974 * Allocate a new namespace structure and populate it with contents 1975 * copied from the namespace of the passed in task structure. 1976 */ 1977 static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns, 1978 struct fs_struct *fs) 1979 { 1980 struct mnt_namespace *new_ns; 1981 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL; 1982 struct vfsmount *p, *q; 1983 1984 new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL); 1985 if (!new_ns) 1986 return ERR_PTR(-ENOMEM); 1987 1988 atomic_set(&new_ns->count, 1); 1989 INIT_LIST_HEAD(&new_ns->list); 1990 init_waitqueue_head(&new_ns->poll); 1991 new_ns->event = 0; 1992 1993 down_write(&namespace_sem); 1994 /* First pass: copy the tree topology */ 1995 new_ns->root = copy_tree(mnt_ns->root, mnt_ns->root->mnt_root, 1996 CL_COPY_ALL | CL_EXPIRE); 1997 if (!new_ns->root) { 1998 up_write(&namespace_sem); 1999 kfree(new_ns); 2000 return ERR_PTR(-ENOMEM); 2001 } 2002 spin_lock(&vfsmount_lock); 2003 list_add_tail(&new_ns->list, &new_ns->root->mnt_list); 2004 spin_unlock(&vfsmount_lock); 2005 2006 /* 2007 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts 2008 * as belonging to new namespace. We have already acquired a private 2009 * fs_struct, so tsk->fs->lock is not needed. 2010 */ 2011 p = mnt_ns->root; 2012 q = new_ns->root; 2013 while (p) { 2014 q->mnt_ns = new_ns; 2015 if (fs) { 2016 if (p == fs->root.mnt) { 2017 rootmnt = p; 2018 fs->root.mnt = mntget(q); 2019 } 2020 if (p == fs->pwd.mnt) { 2021 pwdmnt = p; 2022 fs->pwd.mnt = mntget(q); 2023 } 2024 } 2025 p = next_mnt(p, mnt_ns->root); 2026 q = next_mnt(q, new_ns->root); 2027 } 2028 up_write(&namespace_sem); 2029 2030 if (rootmnt) 2031 mntput(rootmnt); 2032 if (pwdmnt) 2033 mntput(pwdmnt); 2034 2035 return new_ns; 2036 } 2037 2038 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, 2039 struct fs_struct *new_fs) 2040 { 2041 struct mnt_namespace *new_ns; 2042 2043 BUG_ON(!ns); 2044 get_mnt_ns(ns); 2045 2046 if (!(flags & CLONE_NEWNS)) 2047 return ns; 2048 2049 new_ns = dup_mnt_ns(ns, new_fs); 2050 2051 put_mnt_ns(ns); 2052 return new_ns; 2053 } 2054 2055 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, 2056 char __user *, type, unsigned long, flags, void __user *, data) 2057 { 2058 int retval; 2059 unsigned long data_page; 2060 unsigned long type_page; 2061 unsigned long dev_page; 2062 char *dir_page; 2063 2064 retval = copy_mount_options(type, &type_page); 2065 if (retval < 0) 2066 return retval; 2067 2068 dir_page = getname(dir_name); 2069 retval = PTR_ERR(dir_page); 2070 if (IS_ERR(dir_page)) 2071 goto out1; 2072 2073 retval = copy_mount_options(dev_name, &dev_page); 2074 if (retval < 0) 2075 goto out2; 2076 2077 retval = copy_mount_options(data, &data_page); 2078 if (retval < 0) 2079 goto out3; 2080 2081 lock_kernel(); 2082 retval = do_mount((char *)dev_page, dir_page, (char *)type_page, 2083 flags, (void *)data_page); 2084 unlock_kernel(); 2085 free_page(data_page); 2086 2087 out3: 2088 free_page(dev_page); 2089 out2: 2090 putname(dir_page); 2091 out1: 2092 free_page(type_page); 2093 return retval; 2094 } 2095 2096 /* 2097 * pivot_root Semantics: 2098 * Moves the root file system of the current process to the directory put_old, 2099 * makes new_root as the new root file system of the current process, and sets 2100 * root/cwd of all processes which had them on the current root to new_root. 2101 * 2102 * Restrictions: 2103 * The new_root and put_old must be directories, and must not be on the 2104 * same file system as the current process root. The put_old must be 2105 * underneath new_root, i.e. adding a non-zero number of /.. to the string 2106 * pointed to by put_old must yield the same directory as new_root. No other 2107 * file system may be mounted on put_old. After all, new_root is a mountpoint. 2108 * 2109 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem. 2110 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives 2111 * in this situation. 2112 * 2113 * Notes: 2114 * - we don't move root/cwd if they are not at the root (reason: if something 2115 * cared enough to change them, it's probably wrong to force them elsewhere) 2116 * - it's okay to pick a root that isn't the root of a file system, e.g. 2117 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint, 2118 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root 2119 * first. 2120 */ 2121 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, 2122 const char __user *, put_old) 2123 { 2124 struct vfsmount *tmp; 2125 struct path new, old, parent_path, root_parent, root; 2126 int error; 2127 2128 if (!capable(CAP_SYS_ADMIN)) 2129 return -EPERM; 2130 2131 error = user_path_dir(new_root, &new); 2132 if (error) 2133 goto out0; 2134 error = -EINVAL; 2135 if (!check_mnt(new.mnt)) 2136 goto out1; 2137 2138 error = user_path_dir(put_old, &old); 2139 if (error) 2140 goto out1; 2141 2142 error = security_sb_pivotroot(&old, &new); 2143 if (error) { 2144 path_put(&old); 2145 goto out1; 2146 } 2147 2148 read_lock(¤t->fs->lock); 2149 root = current->fs->root; 2150 path_get(¤t->fs->root); 2151 read_unlock(¤t->fs->lock); 2152 down_write(&namespace_sem); 2153 mutex_lock(&old.dentry->d_inode->i_mutex); 2154 error = -EINVAL; 2155 if (IS_MNT_SHARED(old.mnt) || 2156 IS_MNT_SHARED(new.mnt->mnt_parent) || 2157 IS_MNT_SHARED(root.mnt->mnt_parent)) 2158 goto out2; 2159 if (!check_mnt(root.mnt)) 2160 goto out2; 2161 error = -ENOENT; 2162 if (IS_DEADDIR(new.dentry->d_inode)) 2163 goto out2; 2164 if (d_unhashed(new.dentry) && !IS_ROOT(new.dentry)) 2165 goto out2; 2166 if (d_unhashed(old.dentry) && !IS_ROOT(old.dentry)) 2167 goto out2; 2168 error = -EBUSY; 2169 if (new.mnt == root.mnt || 2170 old.mnt == root.mnt) 2171 goto out2; /* loop, on the same file system */ 2172 error = -EINVAL; 2173 if (root.mnt->mnt_root != root.dentry) 2174 goto out2; /* not a mountpoint */ 2175 if (root.mnt->mnt_parent == root.mnt) 2176 goto out2; /* not attached */ 2177 if (new.mnt->mnt_root != new.dentry) 2178 goto out2; /* not a mountpoint */ 2179 if (new.mnt->mnt_parent == new.mnt) 2180 goto out2; /* not attached */ 2181 /* make sure we can reach put_old from new_root */ 2182 tmp = old.mnt; 2183 spin_lock(&vfsmount_lock); 2184 if (tmp != new.mnt) { 2185 for (;;) { 2186 if (tmp->mnt_parent == tmp) 2187 goto out3; /* already mounted on put_old */ 2188 if (tmp->mnt_parent == new.mnt) 2189 break; 2190 tmp = tmp->mnt_parent; 2191 } 2192 if (!is_subdir(tmp->mnt_mountpoint, new.dentry)) 2193 goto out3; 2194 } else if (!is_subdir(old.dentry, new.dentry)) 2195 goto out3; 2196 detach_mnt(new.mnt, &parent_path); 2197 detach_mnt(root.mnt, &root_parent); 2198 /* mount old root on put_old */ 2199 attach_mnt(root.mnt, &old); 2200 /* mount new_root on / */ 2201 attach_mnt(new.mnt, &root_parent); 2202 touch_mnt_namespace(current->nsproxy->mnt_ns); 2203 spin_unlock(&vfsmount_lock); 2204 chroot_fs_refs(&root, &new); 2205 security_sb_post_pivotroot(&root, &new); 2206 error = 0; 2207 path_put(&root_parent); 2208 path_put(&parent_path); 2209 out2: 2210 mutex_unlock(&old.dentry->d_inode->i_mutex); 2211 up_write(&namespace_sem); 2212 path_put(&root); 2213 path_put(&old); 2214 out1: 2215 path_put(&new); 2216 out0: 2217 return error; 2218 out3: 2219 spin_unlock(&vfsmount_lock); 2220 goto out2; 2221 } 2222 2223 static void __init init_mount_tree(void) 2224 { 2225 struct vfsmount *mnt; 2226 struct mnt_namespace *ns; 2227 struct path root; 2228 2229 mnt = do_kern_mount("rootfs", 0, "rootfs", NULL); 2230 if (IS_ERR(mnt)) 2231 panic("Can't create rootfs"); 2232 ns = kmalloc(sizeof(*ns), GFP_KERNEL); 2233 if (!ns) 2234 panic("Can't allocate initial namespace"); 2235 atomic_set(&ns->count, 1); 2236 INIT_LIST_HEAD(&ns->list); 2237 init_waitqueue_head(&ns->poll); 2238 ns->event = 0; 2239 list_add(&mnt->mnt_list, &ns->list); 2240 ns->root = mnt; 2241 mnt->mnt_ns = ns; 2242 2243 init_task.nsproxy->mnt_ns = ns; 2244 get_mnt_ns(ns); 2245 2246 root.mnt = ns->root; 2247 root.dentry = ns->root->mnt_root; 2248 2249 set_fs_pwd(current->fs, &root); 2250 set_fs_root(current->fs, &root); 2251 } 2252 2253 void __init mnt_init(void) 2254 { 2255 unsigned u; 2256 int err; 2257 2258 init_rwsem(&namespace_sem); 2259 2260 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount), 2261 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 2262 2263 mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); 2264 2265 if (!mount_hashtable) 2266 panic("Failed to allocate mount hash table\n"); 2267 2268 printk("Mount-cache hash table entries: %lu\n", HASH_SIZE); 2269 2270 for (u = 0; u < HASH_SIZE; u++) 2271 INIT_LIST_HEAD(&mount_hashtable[u]); 2272 2273 err = sysfs_init(); 2274 if (err) 2275 printk(KERN_WARNING "%s: sysfs_init error: %d\n", 2276 __func__, err); 2277 fs_kobj = kobject_create_and_add("fs", NULL); 2278 if (!fs_kobj) 2279 printk(KERN_WARNING "%s: kobj create error\n", __func__); 2280 init_rootfs(); 2281 init_mount_tree(); 2282 } 2283 2284 void __put_mnt_ns(struct mnt_namespace *ns) 2285 { 2286 struct vfsmount *root = ns->root; 2287 LIST_HEAD(umount_list); 2288 ns->root = NULL; 2289 spin_unlock(&vfsmount_lock); 2290 down_write(&namespace_sem); 2291 spin_lock(&vfsmount_lock); 2292 umount_tree(root, 0, &umount_list); 2293 spin_unlock(&vfsmount_lock); 2294 up_write(&namespace_sem); 2295 release_mounts(&umount_list); 2296 kfree(ns); 2297 } 2298