1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/namespace.c 4 * 5 * (C) Copyright Al Viro 2000, 2001 6 * 7 * Based on code from fs/super.c, copyright Linus Torvalds and others. 8 * Heavily rewritten. 9 */ 10 11 #include <linux/syscalls.h> 12 #include <linux/export.h> 13 #include <linux/capability.h> 14 #include <linux/mnt_namespace.h> 15 #include <linux/user_namespace.h> 16 #include <linux/namei.h> 17 #include <linux/security.h> 18 #include <linux/cred.h> 19 #include <linux/idr.h> 20 #include <linux/init.h> /* init_rootfs */ 21 #include <linux/fs_struct.h> /* get_fs_root et.al. */ 22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */ 23 #include <linux/file.h> 24 #include <linux/uaccess.h> 25 #include <linux/proc_ns.h> 26 #include <linux/magic.h> 27 #include <linux/memblock.h> 28 #include <linux/proc_fs.h> 29 #include <linux/task_work.h> 30 #include <linux/sched/task.h> 31 #include <uapi/linux/mount.h> 32 #include <linux/fs_context.h> 33 #include <linux/shmem_fs.h> 34 #include <linux/mnt_idmapping.h> 35 36 #include "pnode.h" 37 #include "internal.h" 38 39 /* Maximum number of mounts in a mount namespace */ 40 static unsigned int sysctl_mount_max __read_mostly = 100000; 41 42 static unsigned int m_hash_mask __read_mostly; 43 static unsigned int m_hash_shift __read_mostly; 44 static unsigned int mp_hash_mask __read_mostly; 45 static unsigned int mp_hash_shift __read_mostly; 46 47 static __initdata unsigned long mhash_entries; 48 static int __init set_mhash_entries(char *str) 49 { 50 if (!str) 51 return 0; 52 mhash_entries = simple_strtoul(str, &str, 0); 53 return 1; 54 } 55 __setup("mhash_entries=", set_mhash_entries); 56 57 static __initdata unsigned long mphash_entries; 58 static int __init set_mphash_entries(char *str) 59 { 60 if (!str) 61 return 0; 62 mphash_entries = simple_strtoul(str, &str, 0); 63 return 1; 64 } 65 __setup("mphash_entries=", set_mphash_entries); 66 67 static u64 event; 68 static DEFINE_IDA(mnt_id_ida); 69 static DEFINE_IDA(mnt_group_ida); 70 71 static struct hlist_head *mount_hashtable __read_mostly; 72 static struct hlist_head *mountpoint_hashtable __read_mostly; 73 static struct kmem_cache *mnt_cache __read_mostly; 74 static DECLARE_RWSEM(namespace_sem); 75 static HLIST_HEAD(unmounted); /* protected by namespace_sem */ 76 static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */ 77 78 struct mount_kattr { 79 unsigned int attr_set; 80 unsigned int attr_clr; 81 unsigned int propagation; 82 unsigned int lookup_flags; 83 bool recurse; 84 struct user_namespace *mnt_userns; 85 struct mnt_idmap *mnt_idmap; 86 }; 87 88 /* /sys/fs */ 89 struct kobject *fs_kobj; 90 EXPORT_SYMBOL_GPL(fs_kobj); 91 92 /* 93 * vfsmount lock may be taken for read to prevent changes to the 94 * vfsmount hash, ie. during mountpoint lookups or walking back 95 * up the tree. 96 * 97 * It should be taken for write in all cases where the vfsmount 98 * tree or hash is modified or when a vfsmount structure is modified. 99 */ 100 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); 101 102 static inline void lock_mount_hash(void) 103 { 104 write_seqlock(&mount_lock); 105 } 106 107 static inline void unlock_mount_hash(void) 108 { 109 write_sequnlock(&mount_lock); 110 } 111 112 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry) 113 { 114 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); 115 tmp += ((unsigned long)dentry / L1_CACHE_BYTES); 116 tmp = tmp + (tmp >> m_hash_shift); 117 return &mount_hashtable[tmp & m_hash_mask]; 118 } 119 120 static inline struct hlist_head *mp_hash(struct dentry *dentry) 121 { 122 unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES); 123 tmp = tmp + (tmp >> mp_hash_shift); 124 return &mountpoint_hashtable[tmp & mp_hash_mask]; 125 } 126 127 static int mnt_alloc_id(struct mount *mnt) 128 { 129 int res = ida_alloc(&mnt_id_ida, GFP_KERNEL); 130 131 if (res < 0) 132 return res; 133 mnt->mnt_id = res; 134 return 0; 135 } 136 137 static void mnt_free_id(struct mount *mnt) 138 { 139 ida_free(&mnt_id_ida, mnt->mnt_id); 140 } 141 142 /* 143 * Allocate a new peer group ID 144 */ 145 static int mnt_alloc_group_id(struct mount *mnt) 146 { 147 int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL); 148 149 if (res < 0) 150 return res; 151 mnt->mnt_group_id = res; 152 return 0; 153 } 154 155 /* 156 * Release a peer group ID 157 */ 158 void mnt_release_group_id(struct mount *mnt) 159 { 160 ida_free(&mnt_group_ida, mnt->mnt_group_id); 161 mnt->mnt_group_id = 0; 162 } 163 164 /* 165 * vfsmount lock must be held for read 166 */ 167 static inline void mnt_add_count(struct mount *mnt, int n) 168 { 169 #ifdef CONFIG_SMP 170 this_cpu_add(mnt->mnt_pcp->mnt_count, n); 171 #else 172 preempt_disable(); 173 mnt->mnt_count += n; 174 preempt_enable(); 175 #endif 176 } 177 178 /* 179 * vfsmount lock must be held for write 180 */ 181 int mnt_get_count(struct mount *mnt) 182 { 183 #ifdef CONFIG_SMP 184 int count = 0; 185 int cpu; 186 187 for_each_possible_cpu(cpu) { 188 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count; 189 } 190 191 return count; 192 #else 193 return mnt->mnt_count; 194 #endif 195 } 196 197 static struct mount *alloc_vfsmnt(const char *name) 198 { 199 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); 200 if (mnt) { 201 int err; 202 203 err = mnt_alloc_id(mnt); 204 if (err) 205 goto out_free_cache; 206 207 if (name) { 208 mnt->mnt_devname = kstrdup_const(name, 209 GFP_KERNEL_ACCOUNT); 210 if (!mnt->mnt_devname) 211 goto out_free_id; 212 } 213 214 #ifdef CONFIG_SMP 215 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp); 216 if (!mnt->mnt_pcp) 217 goto out_free_devname; 218 219 this_cpu_add(mnt->mnt_pcp->mnt_count, 1); 220 #else 221 mnt->mnt_count = 1; 222 mnt->mnt_writers = 0; 223 #endif 224 225 INIT_HLIST_NODE(&mnt->mnt_hash); 226 INIT_LIST_HEAD(&mnt->mnt_child); 227 INIT_LIST_HEAD(&mnt->mnt_mounts); 228 INIT_LIST_HEAD(&mnt->mnt_list); 229 INIT_LIST_HEAD(&mnt->mnt_expire); 230 INIT_LIST_HEAD(&mnt->mnt_share); 231 INIT_LIST_HEAD(&mnt->mnt_slave_list); 232 INIT_LIST_HEAD(&mnt->mnt_slave); 233 INIT_HLIST_NODE(&mnt->mnt_mp_list); 234 INIT_LIST_HEAD(&mnt->mnt_umounting); 235 INIT_HLIST_HEAD(&mnt->mnt_stuck_children); 236 mnt->mnt.mnt_idmap = &nop_mnt_idmap; 237 } 238 return mnt; 239 240 #ifdef CONFIG_SMP 241 out_free_devname: 242 kfree_const(mnt->mnt_devname); 243 #endif 244 out_free_id: 245 mnt_free_id(mnt); 246 out_free_cache: 247 kmem_cache_free(mnt_cache, mnt); 248 return NULL; 249 } 250 251 /* 252 * Most r/o checks on a fs are for operations that take 253 * discrete amounts of time, like a write() or unlink(). 254 * We must keep track of when those operations start 255 * (for permission checks) and when they end, so that 256 * we can determine when writes are able to occur to 257 * a filesystem. 258 */ 259 /* 260 * __mnt_is_readonly: check whether a mount is read-only 261 * @mnt: the mount to check for its write status 262 * 263 * This shouldn't be used directly ouside of the VFS. 264 * It does not guarantee that the filesystem will stay 265 * r/w, just that it is right *now*. This can not and 266 * should not be used in place of IS_RDONLY(inode). 267 * mnt_want/drop_write() will _keep_ the filesystem 268 * r/w. 269 */ 270 bool __mnt_is_readonly(struct vfsmount *mnt) 271 { 272 return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb); 273 } 274 EXPORT_SYMBOL_GPL(__mnt_is_readonly); 275 276 static inline void mnt_inc_writers(struct mount *mnt) 277 { 278 #ifdef CONFIG_SMP 279 this_cpu_inc(mnt->mnt_pcp->mnt_writers); 280 #else 281 mnt->mnt_writers++; 282 #endif 283 } 284 285 static inline void mnt_dec_writers(struct mount *mnt) 286 { 287 #ifdef CONFIG_SMP 288 this_cpu_dec(mnt->mnt_pcp->mnt_writers); 289 #else 290 mnt->mnt_writers--; 291 #endif 292 } 293 294 static unsigned int mnt_get_writers(struct mount *mnt) 295 { 296 #ifdef CONFIG_SMP 297 unsigned int count = 0; 298 int cpu; 299 300 for_each_possible_cpu(cpu) { 301 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers; 302 } 303 304 return count; 305 #else 306 return mnt->mnt_writers; 307 #endif 308 } 309 310 static int mnt_is_readonly(struct vfsmount *mnt) 311 { 312 if (mnt->mnt_sb->s_readonly_remount) 313 return 1; 314 /* Order wrt setting s_flags/s_readonly_remount in do_remount() */ 315 smp_rmb(); 316 return __mnt_is_readonly(mnt); 317 } 318 319 /* 320 * Most r/o & frozen checks on a fs are for operations that take discrete 321 * amounts of time, like a write() or unlink(). We must keep track of when 322 * those operations start (for permission checks) and when they end, so that we 323 * can determine when writes are able to occur to a filesystem. 324 */ 325 /** 326 * __mnt_want_write - get write access to a mount without freeze protection 327 * @m: the mount on which to take a write 328 * 329 * This tells the low-level filesystem that a write is about to be performed to 330 * it, and makes sure that writes are allowed (mnt it read-write) before 331 * returning success. This operation does not protect against filesystem being 332 * frozen. When the write operation is finished, __mnt_drop_write() must be 333 * called. This is effectively a refcount. 334 */ 335 int __mnt_want_write(struct vfsmount *m) 336 { 337 struct mount *mnt = real_mount(m); 338 int ret = 0; 339 340 preempt_disable(); 341 mnt_inc_writers(mnt); 342 /* 343 * The store to mnt_inc_writers must be visible before we pass 344 * MNT_WRITE_HOLD loop below, so that the slowpath can see our 345 * incremented count after it has set MNT_WRITE_HOLD. 346 */ 347 smp_mb(); 348 might_lock(&mount_lock.lock); 349 while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { 350 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { 351 cpu_relax(); 352 } else { 353 /* 354 * This prevents priority inversion, if the task 355 * setting MNT_WRITE_HOLD got preempted on a remote 356 * CPU, and it prevents life lock if the task setting 357 * MNT_WRITE_HOLD has a lower priority and is bound to 358 * the same CPU as the task that is spinning here. 359 */ 360 preempt_enable(); 361 lock_mount_hash(); 362 unlock_mount_hash(); 363 preempt_disable(); 364 } 365 } 366 /* 367 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will 368 * be set to match its requirements. So we must not load that until 369 * MNT_WRITE_HOLD is cleared. 370 */ 371 smp_rmb(); 372 if (mnt_is_readonly(m)) { 373 mnt_dec_writers(mnt); 374 ret = -EROFS; 375 } 376 preempt_enable(); 377 378 return ret; 379 } 380 381 /** 382 * mnt_want_write - get write access to a mount 383 * @m: the mount on which to take a write 384 * 385 * This tells the low-level filesystem that a write is about to be performed to 386 * it, and makes sure that writes are allowed (mount is read-write, filesystem 387 * is not frozen) before returning success. When the write operation is 388 * finished, mnt_drop_write() must be called. This is effectively a refcount. 389 */ 390 int mnt_want_write(struct vfsmount *m) 391 { 392 int ret; 393 394 sb_start_write(m->mnt_sb); 395 ret = __mnt_want_write(m); 396 if (ret) 397 sb_end_write(m->mnt_sb); 398 return ret; 399 } 400 EXPORT_SYMBOL_GPL(mnt_want_write); 401 402 /** 403 * __mnt_want_write_file - get write access to a file's mount 404 * @file: the file who's mount on which to take a write 405 * 406 * This is like __mnt_want_write, but if the file is already open for writing it 407 * skips incrementing mnt_writers (since the open file already has a reference) 408 * and instead only does the check for emergency r/o remounts. This must be 409 * paired with __mnt_drop_write_file. 410 */ 411 int __mnt_want_write_file(struct file *file) 412 { 413 if (file->f_mode & FMODE_WRITER) { 414 /* 415 * Superblock may have become readonly while there are still 416 * writable fd's, e.g. due to a fs error with errors=remount-ro 417 */ 418 if (__mnt_is_readonly(file->f_path.mnt)) 419 return -EROFS; 420 return 0; 421 } 422 return __mnt_want_write(file->f_path.mnt); 423 } 424 425 /** 426 * mnt_want_write_file - get write access to a file's mount 427 * @file: the file who's mount on which to take a write 428 * 429 * This is like mnt_want_write, but if the file is already open for writing it 430 * skips incrementing mnt_writers (since the open file already has a reference) 431 * and instead only does the freeze protection and the check for emergency r/o 432 * remounts. This must be paired with mnt_drop_write_file. 433 */ 434 int mnt_want_write_file(struct file *file) 435 { 436 int ret; 437 438 sb_start_write(file_inode(file)->i_sb); 439 ret = __mnt_want_write_file(file); 440 if (ret) 441 sb_end_write(file_inode(file)->i_sb); 442 return ret; 443 } 444 EXPORT_SYMBOL_GPL(mnt_want_write_file); 445 446 /** 447 * __mnt_drop_write - give up write access to a mount 448 * @mnt: the mount on which to give up write access 449 * 450 * Tells the low-level filesystem that we are done 451 * performing writes to it. Must be matched with 452 * __mnt_want_write() call above. 453 */ 454 void __mnt_drop_write(struct vfsmount *mnt) 455 { 456 preempt_disable(); 457 mnt_dec_writers(real_mount(mnt)); 458 preempt_enable(); 459 } 460 461 /** 462 * mnt_drop_write - give up write access to a mount 463 * @mnt: the mount on which to give up write access 464 * 465 * Tells the low-level filesystem that we are done performing writes to it and 466 * also allows filesystem to be frozen again. Must be matched with 467 * mnt_want_write() call above. 468 */ 469 void mnt_drop_write(struct vfsmount *mnt) 470 { 471 __mnt_drop_write(mnt); 472 sb_end_write(mnt->mnt_sb); 473 } 474 EXPORT_SYMBOL_GPL(mnt_drop_write); 475 476 void __mnt_drop_write_file(struct file *file) 477 { 478 if (!(file->f_mode & FMODE_WRITER)) 479 __mnt_drop_write(file->f_path.mnt); 480 } 481 482 void mnt_drop_write_file(struct file *file) 483 { 484 __mnt_drop_write_file(file); 485 sb_end_write(file_inode(file)->i_sb); 486 } 487 EXPORT_SYMBOL(mnt_drop_write_file); 488 489 /** 490 * mnt_hold_writers - prevent write access to the given mount 491 * @mnt: mnt to prevent write access to 492 * 493 * Prevents write access to @mnt if there are no active writers for @mnt. 494 * This function needs to be called and return successfully before changing 495 * properties of @mnt that need to remain stable for callers with write access 496 * to @mnt. 497 * 498 * After this functions has been called successfully callers must pair it with 499 * a call to mnt_unhold_writers() in order to stop preventing write access to 500 * @mnt. 501 * 502 * Context: This function expects lock_mount_hash() to be held serializing 503 * setting MNT_WRITE_HOLD. 504 * Return: On success 0 is returned. 505 * On error, -EBUSY is returned. 506 */ 507 static inline int mnt_hold_writers(struct mount *mnt) 508 { 509 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; 510 /* 511 * After storing MNT_WRITE_HOLD, we'll read the counters. This store 512 * should be visible before we do. 513 */ 514 smp_mb(); 515 516 /* 517 * With writers on hold, if this value is zero, then there are 518 * definitely no active writers (although held writers may subsequently 519 * increment the count, they'll have to wait, and decrement it after 520 * seeing MNT_READONLY). 521 * 522 * It is OK to have counter incremented on one CPU and decremented on 523 * another: the sum will add up correctly. The danger would be when we 524 * sum up each counter, if we read a counter before it is incremented, 525 * but then read another CPU's count which it has been subsequently 526 * decremented from -- we would see more decrements than we should. 527 * MNT_WRITE_HOLD protects against this scenario, because 528 * mnt_want_write first increments count, then smp_mb, then spins on 529 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while 530 * we're counting up here. 531 */ 532 if (mnt_get_writers(mnt) > 0) 533 return -EBUSY; 534 535 return 0; 536 } 537 538 /** 539 * mnt_unhold_writers - stop preventing write access to the given mount 540 * @mnt: mnt to stop preventing write access to 541 * 542 * Stop preventing write access to @mnt allowing callers to gain write access 543 * to @mnt again. 544 * 545 * This function can only be called after a successful call to 546 * mnt_hold_writers(). 547 * 548 * Context: This function expects lock_mount_hash() to be held. 549 */ 550 static inline void mnt_unhold_writers(struct mount *mnt) 551 { 552 /* 553 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers 554 * that become unheld will see MNT_READONLY. 555 */ 556 smp_wmb(); 557 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; 558 } 559 560 static int mnt_make_readonly(struct mount *mnt) 561 { 562 int ret; 563 564 ret = mnt_hold_writers(mnt); 565 if (!ret) 566 mnt->mnt.mnt_flags |= MNT_READONLY; 567 mnt_unhold_writers(mnt); 568 return ret; 569 } 570 571 int sb_prepare_remount_readonly(struct super_block *sb) 572 { 573 struct mount *mnt; 574 int err = 0; 575 576 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */ 577 if (atomic_long_read(&sb->s_remove_count)) 578 return -EBUSY; 579 580 lock_mount_hash(); 581 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { 582 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) { 583 err = mnt_hold_writers(mnt); 584 if (err) 585 break; 586 } 587 } 588 if (!err && atomic_long_read(&sb->s_remove_count)) 589 err = -EBUSY; 590 591 if (!err) { 592 sb->s_readonly_remount = 1; 593 smp_wmb(); 594 } 595 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { 596 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) 597 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; 598 } 599 unlock_mount_hash(); 600 601 return err; 602 } 603 604 static void free_vfsmnt(struct mount *mnt) 605 { 606 mnt_idmap_put(mnt_idmap(&mnt->mnt)); 607 kfree_const(mnt->mnt_devname); 608 #ifdef CONFIG_SMP 609 free_percpu(mnt->mnt_pcp); 610 #endif 611 kmem_cache_free(mnt_cache, mnt); 612 } 613 614 static void delayed_free_vfsmnt(struct rcu_head *head) 615 { 616 free_vfsmnt(container_of(head, struct mount, mnt_rcu)); 617 } 618 619 /* call under rcu_read_lock */ 620 int __legitimize_mnt(struct vfsmount *bastard, unsigned seq) 621 { 622 struct mount *mnt; 623 if (read_seqretry(&mount_lock, seq)) 624 return 1; 625 if (bastard == NULL) 626 return 0; 627 mnt = real_mount(bastard); 628 mnt_add_count(mnt, 1); 629 smp_mb(); // see mntput_no_expire() 630 if (likely(!read_seqretry(&mount_lock, seq))) 631 return 0; 632 if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { 633 mnt_add_count(mnt, -1); 634 return 1; 635 } 636 lock_mount_hash(); 637 if (unlikely(bastard->mnt_flags & MNT_DOOMED)) { 638 mnt_add_count(mnt, -1); 639 unlock_mount_hash(); 640 return 1; 641 } 642 unlock_mount_hash(); 643 /* caller will mntput() */ 644 return -1; 645 } 646 647 /* call under rcu_read_lock */ 648 static bool legitimize_mnt(struct vfsmount *bastard, unsigned seq) 649 { 650 int res = __legitimize_mnt(bastard, seq); 651 if (likely(!res)) 652 return true; 653 if (unlikely(res < 0)) { 654 rcu_read_unlock(); 655 mntput(bastard); 656 rcu_read_lock(); 657 } 658 return false; 659 } 660 661 /* 662 * find the first mount at @dentry on vfsmount @mnt. 663 * call under rcu_read_lock() 664 */ 665 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) 666 { 667 struct hlist_head *head = m_hash(mnt, dentry); 668 struct mount *p; 669 670 hlist_for_each_entry_rcu(p, head, mnt_hash) 671 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) 672 return p; 673 return NULL; 674 } 675 676 /* 677 * lookup_mnt - Return the first child mount mounted at path 678 * 679 * "First" means first mounted chronologically. If you create the 680 * following mounts: 681 * 682 * mount /dev/sda1 /mnt 683 * mount /dev/sda2 /mnt 684 * mount /dev/sda3 /mnt 685 * 686 * Then lookup_mnt() on the base /mnt dentry in the root mount will 687 * return successively the root dentry and vfsmount of /dev/sda1, then 688 * /dev/sda2, then /dev/sda3, then NULL. 689 * 690 * lookup_mnt takes a reference to the found vfsmount. 691 */ 692 struct vfsmount *lookup_mnt(const struct path *path) 693 { 694 struct mount *child_mnt; 695 struct vfsmount *m; 696 unsigned seq; 697 698 rcu_read_lock(); 699 do { 700 seq = read_seqbegin(&mount_lock); 701 child_mnt = __lookup_mnt(path->mnt, path->dentry); 702 m = child_mnt ? &child_mnt->mnt : NULL; 703 } while (!legitimize_mnt(m, seq)); 704 rcu_read_unlock(); 705 return m; 706 } 707 708 static inline void lock_ns_list(struct mnt_namespace *ns) 709 { 710 spin_lock(&ns->ns_lock); 711 } 712 713 static inline void unlock_ns_list(struct mnt_namespace *ns) 714 { 715 spin_unlock(&ns->ns_lock); 716 } 717 718 static inline bool mnt_is_cursor(struct mount *mnt) 719 { 720 return mnt->mnt.mnt_flags & MNT_CURSOR; 721 } 722 723 /* 724 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the 725 * current mount namespace. 726 * 727 * The common case is dentries are not mountpoints at all and that 728 * test is handled inline. For the slow case when we are actually 729 * dealing with a mountpoint of some kind, walk through all of the 730 * mounts in the current mount namespace and test to see if the dentry 731 * is a mountpoint. 732 * 733 * The mount_hashtable is not usable in the context because we 734 * need to identify all mounts that may be in the current mount 735 * namespace not just a mount that happens to have some specified 736 * parent mount. 737 */ 738 bool __is_local_mountpoint(struct dentry *dentry) 739 { 740 struct mnt_namespace *ns = current->nsproxy->mnt_ns; 741 struct mount *mnt; 742 bool is_covered = false; 743 744 down_read(&namespace_sem); 745 lock_ns_list(ns); 746 list_for_each_entry(mnt, &ns->list, mnt_list) { 747 if (mnt_is_cursor(mnt)) 748 continue; 749 is_covered = (mnt->mnt_mountpoint == dentry); 750 if (is_covered) 751 break; 752 } 753 unlock_ns_list(ns); 754 up_read(&namespace_sem); 755 756 return is_covered; 757 } 758 759 static struct mountpoint *lookup_mountpoint(struct dentry *dentry) 760 { 761 struct hlist_head *chain = mp_hash(dentry); 762 struct mountpoint *mp; 763 764 hlist_for_each_entry(mp, chain, m_hash) { 765 if (mp->m_dentry == dentry) { 766 mp->m_count++; 767 return mp; 768 } 769 } 770 return NULL; 771 } 772 773 static struct mountpoint *get_mountpoint(struct dentry *dentry) 774 { 775 struct mountpoint *mp, *new = NULL; 776 int ret; 777 778 if (d_mountpoint(dentry)) { 779 /* might be worth a WARN_ON() */ 780 if (d_unlinked(dentry)) 781 return ERR_PTR(-ENOENT); 782 mountpoint: 783 read_seqlock_excl(&mount_lock); 784 mp = lookup_mountpoint(dentry); 785 read_sequnlock_excl(&mount_lock); 786 if (mp) 787 goto done; 788 } 789 790 if (!new) 791 new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL); 792 if (!new) 793 return ERR_PTR(-ENOMEM); 794 795 796 /* Exactly one processes may set d_mounted */ 797 ret = d_set_mounted(dentry); 798 799 /* Someone else set d_mounted? */ 800 if (ret == -EBUSY) 801 goto mountpoint; 802 803 /* The dentry is not available as a mountpoint? */ 804 mp = ERR_PTR(ret); 805 if (ret) 806 goto done; 807 808 /* Add the new mountpoint to the hash table */ 809 read_seqlock_excl(&mount_lock); 810 new->m_dentry = dget(dentry); 811 new->m_count = 1; 812 hlist_add_head(&new->m_hash, mp_hash(dentry)); 813 INIT_HLIST_HEAD(&new->m_list); 814 read_sequnlock_excl(&mount_lock); 815 816 mp = new; 817 new = NULL; 818 done: 819 kfree(new); 820 return mp; 821 } 822 823 /* 824 * vfsmount lock must be held. Additionally, the caller is responsible 825 * for serializing calls for given disposal list. 826 */ 827 static void __put_mountpoint(struct mountpoint *mp, struct list_head *list) 828 { 829 if (!--mp->m_count) { 830 struct dentry *dentry = mp->m_dentry; 831 BUG_ON(!hlist_empty(&mp->m_list)); 832 spin_lock(&dentry->d_lock); 833 dentry->d_flags &= ~DCACHE_MOUNTED; 834 spin_unlock(&dentry->d_lock); 835 dput_to_list(dentry, list); 836 hlist_del(&mp->m_hash); 837 kfree(mp); 838 } 839 } 840 841 /* called with namespace_lock and vfsmount lock */ 842 static void put_mountpoint(struct mountpoint *mp) 843 { 844 __put_mountpoint(mp, &ex_mountpoints); 845 } 846 847 static inline int check_mnt(struct mount *mnt) 848 { 849 return mnt->mnt_ns == current->nsproxy->mnt_ns; 850 } 851 852 /* 853 * vfsmount lock must be held for write 854 */ 855 static void touch_mnt_namespace(struct mnt_namespace *ns) 856 { 857 if (ns) { 858 ns->event = ++event; 859 wake_up_interruptible(&ns->poll); 860 } 861 } 862 863 /* 864 * vfsmount lock must be held for write 865 */ 866 static void __touch_mnt_namespace(struct mnt_namespace *ns) 867 { 868 if (ns && ns->event != event) { 869 ns->event = event; 870 wake_up_interruptible(&ns->poll); 871 } 872 } 873 874 /* 875 * vfsmount lock must be held for write 876 */ 877 static struct mountpoint *unhash_mnt(struct mount *mnt) 878 { 879 struct mountpoint *mp; 880 mnt->mnt_parent = mnt; 881 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 882 list_del_init(&mnt->mnt_child); 883 hlist_del_init_rcu(&mnt->mnt_hash); 884 hlist_del_init(&mnt->mnt_mp_list); 885 mp = mnt->mnt_mp; 886 mnt->mnt_mp = NULL; 887 return mp; 888 } 889 890 /* 891 * vfsmount lock must be held for write 892 */ 893 static void umount_mnt(struct mount *mnt) 894 { 895 put_mountpoint(unhash_mnt(mnt)); 896 } 897 898 /* 899 * vfsmount lock must be held for write 900 */ 901 void mnt_set_mountpoint(struct mount *mnt, 902 struct mountpoint *mp, 903 struct mount *child_mnt) 904 { 905 mp->m_count++; 906 mnt_add_count(mnt, 1); /* essentially, that's mntget */ 907 child_mnt->mnt_mountpoint = mp->m_dentry; 908 child_mnt->mnt_parent = mnt; 909 child_mnt->mnt_mp = mp; 910 hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list); 911 } 912 913 static void __attach_mnt(struct mount *mnt, struct mount *parent) 914 { 915 hlist_add_head_rcu(&mnt->mnt_hash, 916 m_hash(&parent->mnt, mnt->mnt_mountpoint)); 917 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); 918 } 919 920 /* 921 * vfsmount lock must be held for write 922 */ 923 static void attach_mnt(struct mount *mnt, 924 struct mount *parent, 925 struct mountpoint *mp) 926 { 927 mnt_set_mountpoint(parent, mp, mnt); 928 __attach_mnt(mnt, parent); 929 } 930 931 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt) 932 { 933 struct mountpoint *old_mp = mnt->mnt_mp; 934 struct mount *old_parent = mnt->mnt_parent; 935 936 list_del_init(&mnt->mnt_child); 937 hlist_del_init(&mnt->mnt_mp_list); 938 hlist_del_init_rcu(&mnt->mnt_hash); 939 940 attach_mnt(mnt, parent, mp); 941 942 put_mountpoint(old_mp); 943 mnt_add_count(old_parent, -1); 944 } 945 946 /* 947 * vfsmount lock must be held for write 948 */ 949 static void commit_tree(struct mount *mnt) 950 { 951 struct mount *parent = mnt->mnt_parent; 952 struct mount *m; 953 LIST_HEAD(head); 954 struct mnt_namespace *n = parent->mnt_ns; 955 956 BUG_ON(parent == mnt); 957 958 list_add_tail(&head, &mnt->mnt_list); 959 list_for_each_entry(m, &head, mnt_list) 960 m->mnt_ns = n; 961 962 list_splice(&head, n->list.prev); 963 964 n->mounts += n->pending_mounts; 965 n->pending_mounts = 0; 966 967 __attach_mnt(mnt, parent); 968 touch_mnt_namespace(n); 969 } 970 971 static struct mount *next_mnt(struct mount *p, struct mount *root) 972 { 973 struct list_head *next = p->mnt_mounts.next; 974 if (next == &p->mnt_mounts) { 975 while (1) { 976 if (p == root) 977 return NULL; 978 next = p->mnt_child.next; 979 if (next != &p->mnt_parent->mnt_mounts) 980 break; 981 p = p->mnt_parent; 982 } 983 } 984 return list_entry(next, struct mount, mnt_child); 985 } 986 987 static struct mount *skip_mnt_tree(struct mount *p) 988 { 989 struct list_head *prev = p->mnt_mounts.prev; 990 while (prev != &p->mnt_mounts) { 991 p = list_entry(prev, struct mount, mnt_child); 992 prev = p->mnt_mounts.prev; 993 } 994 return p; 995 } 996 997 /** 998 * vfs_create_mount - Create a mount for a configured superblock 999 * @fc: The configuration context with the superblock attached 1000 * 1001 * Create a mount to an already configured superblock. If necessary, the 1002 * caller should invoke vfs_get_tree() before calling this. 1003 * 1004 * Note that this does not attach the mount to anything. 1005 */ 1006 struct vfsmount *vfs_create_mount(struct fs_context *fc) 1007 { 1008 struct mount *mnt; 1009 1010 if (!fc->root) 1011 return ERR_PTR(-EINVAL); 1012 1013 mnt = alloc_vfsmnt(fc->source ?: "none"); 1014 if (!mnt) 1015 return ERR_PTR(-ENOMEM); 1016 1017 if (fc->sb_flags & SB_KERNMOUNT) 1018 mnt->mnt.mnt_flags = MNT_INTERNAL; 1019 1020 atomic_inc(&fc->root->d_sb->s_active); 1021 mnt->mnt.mnt_sb = fc->root->d_sb; 1022 mnt->mnt.mnt_root = dget(fc->root); 1023 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 1024 mnt->mnt_parent = mnt; 1025 1026 lock_mount_hash(); 1027 list_add_tail(&mnt->mnt_instance, &mnt->mnt.mnt_sb->s_mounts); 1028 unlock_mount_hash(); 1029 return &mnt->mnt; 1030 } 1031 EXPORT_SYMBOL(vfs_create_mount); 1032 1033 struct vfsmount *fc_mount(struct fs_context *fc) 1034 { 1035 int err = vfs_get_tree(fc); 1036 if (!err) { 1037 up_write(&fc->root->d_sb->s_umount); 1038 return vfs_create_mount(fc); 1039 } 1040 return ERR_PTR(err); 1041 } 1042 EXPORT_SYMBOL(fc_mount); 1043 1044 struct vfsmount *vfs_kern_mount(struct file_system_type *type, 1045 int flags, const char *name, 1046 void *data) 1047 { 1048 struct fs_context *fc; 1049 struct vfsmount *mnt; 1050 int ret = 0; 1051 1052 if (!type) 1053 return ERR_PTR(-EINVAL); 1054 1055 fc = fs_context_for_mount(type, flags); 1056 if (IS_ERR(fc)) 1057 return ERR_CAST(fc); 1058 1059 if (name) 1060 ret = vfs_parse_fs_string(fc, "source", 1061 name, strlen(name)); 1062 if (!ret) 1063 ret = parse_monolithic_mount_data(fc, data); 1064 if (!ret) 1065 mnt = fc_mount(fc); 1066 else 1067 mnt = ERR_PTR(ret); 1068 1069 put_fs_context(fc); 1070 return mnt; 1071 } 1072 EXPORT_SYMBOL_GPL(vfs_kern_mount); 1073 1074 struct vfsmount * 1075 vfs_submount(const struct dentry *mountpoint, struct file_system_type *type, 1076 const char *name, void *data) 1077 { 1078 /* Until it is worked out how to pass the user namespace 1079 * through from the parent mount to the submount don't support 1080 * unprivileged mounts with submounts. 1081 */ 1082 if (mountpoint->d_sb->s_user_ns != &init_user_ns) 1083 return ERR_PTR(-EPERM); 1084 1085 return vfs_kern_mount(type, SB_SUBMOUNT, name, data); 1086 } 1087 EXPORT_SYMBOL_GPL(vfs_submount); 1088 1089 static struct mount *clone_mnt(struct mount *old, struct dentry *root, 1090 int flag) 1091 { 1092 struct super_block *sb = old->mnt.mnt_sb; 1093 struct mount *mnt; 1094 int err; 1095 1096 mnt = alloc_vfsmnt(old->mnt_devname); 1097 if (!mnt) 1098 return ERR_PTR(-ENOMEM); 1099 1100 if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE)) 1101 mnt->mnt_group_id = 0; /* not a peer of original */ 1102 else 1103 mnt->mnt_group_id = old->mnt_group_id; 1104 1105 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { 1106 err = mnt_alloc_group_id(mnt); 1107 if (err) 1108 goto out_free; 1109 } 1110 1111 mnt->mnt.mnt_flags = old->mnt.mnt_flags; 1112 mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL); 1113 1114 atomic_inc(&sb->s_active); 1115 mnt->mnt.mnt_idmap = mnt_idmap_get(mnt_idmap(&old->mnt)); 1116 1117 mnt->mnt.mnt_sb = sb; 1118 mnt->mnt.mnt_root = dget(root); 1119 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 1120 mnt->mnt_parent = mnt; 1121 lock_mount_hash(); 1122 list_add_tail(&mnt->mnt_instance, &sb->s_mounts); 1123 unlock_mount_hash(); 1124 1125 if ((flag & CL_SLAVE) || 1126 ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) { 1127 list_add(&mnt->mnt_slave, &old->mnt_slave_list); 1128 mnt->mnt_master = old; 1129 CLEAR_MNT_SHARED(mnt); 1130 } else if (!(flag & CL_PRIVATE)) { 1131 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old)) 1132 list_add(&mnt->mnt_share, &old->mnt_share); 1133 if (IS_MNT_SLAVE(old)) 1134 list_add(&mnt->mnt_slave, &old->mnt_slave); 1135 mnt->mnt_master = old->mnt_master; 1136 } else { 1137 CLEAR_MNT_SHARED(mnt); 1138 } 1139 if (flag & CL_MAKE_SHARED) 1140 set_mnt_shared(mnt); 1141 1142 /* stick the duplicate mount on the same expiry list 1143 * as the original if that was on one */ 1144 if (flag & CL_EXPIRE) { 1145 if (!list_empty(&old->mnt_expire)) 1146 list_add(&mnt->mnt_expire, &old->mnt_expire); 1147 } 1148 1149 return mnt; 1150 1151 out_free: 1152 mnt_free_id(mnt); 1153 free_vfsmnt(mnt); 1154 return ERR_PTR(err); 1155 } 1156 1157 static void cleanup_mnt(struct mount *mnt) 1158 { 1159 struct hlist_node *p; 1160 struct mount *m; 1161 /* 1162 * The warning here probably indicates that somebody messed 1163 * up a mnt_want/drop_write() pair. If this happens, the 1164 * filesystem was probably unable to make r/w->r/o transitions. 1165 * The locking used to deal with mnt_count decrement provides barriers, 1166 * so mnt_get_writers() below is safe. 1167 */ 1168 WARN_ON(mnt_get_writers(mnt)); 1169 if (unlikely(mnt->mnt_pins.first)) 1170 mnt_pin_kill(mnt); 1171 hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) { 1172 hlist_del(&m->mnt_umount); 1173 mntput(&m->mnt); 1174 } 1175 fsnotify_vfsmount_delete(&mnt->mnt); 1176 dput(mnt->mnt.mnt_root); 1177 deactivate_super(mnt->mnt.mnt_sb); 1178 mnt_free_id(mnt); 1179 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt); 1180 } 1181 1182 static void __cleanup_mnt(struct rcu_head *head) 1183 { 1184 cleanup_mnt(container_of(head, struct mount, mnt_rcu)); 1185 } 1186 1187 static LLIST_HEAD(delayed_mntput_list); 1188 static void delayed_mntput(struct work_struct *unused) 1189 { 1190 struct llist_node *node = llist_del_all(&delayed_mntput_list); 1191 struct mount *m, *t; 1192 1193 llist_for_each_entry_safe(m, t, node, mnt_llist) 1194 cleanup_mnt(m); 1195 } 1196 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput); 1197 1198 static void mntput_no_expire(struct mount *mnt) 1199 { 1200 LIST_HEAD(list); 1201 int count; 1202 1203 rcu_read_lock(); 1204 if (likely(READ_ONCE(mnt->mnt_ns))) { 1205 /* 1206 * Since we don't do lock_mount_hash() here, 1207 * ->mnt_ns can change under us. However, if it's 1208 * non-NULL, then there's a reference that won't 1209 * be dropped until after an RCU delay done after 1210 * turning ->mnt_ns NULL. So if we observe it 1211 * non-NULL under rcu_read_lock(), the reference 1212 * we are dropping is not the final one. 1213 */ 1214 mnt_add_count(mnt, -1); 1215 rcu_read_unlock(); 1216 return; 1217 } 1218 lock_mount_hash(); 1219 /* 1220 * make sure that if __legitimize_mnt() has not seen us grab 1221 * mount_lock, we'll see their refcount increment here. 1222 */ 1223 smp_mb(); 1224 mnt_add_count(mnt, -1); 1225 count = mnt_get_count(mnt); 1226 if (count != 0) { 1227 WARN_ON(count < 0); 1228 rcu_read_unlock(); 1229 unlock_mount_hash(); 1230 return; 1231 } 1232 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) { 1233 rcu_read_unlock(); 1234 unlock_mount_hash(); 1235 return; 1236 } 1237 mnt->mnt.mnt_flags |= MNT_DOOMED; 1238 rcu_read_unlock(); 1239 1240 list_del(&mnt->mnt_instance); 1241 1242 if (unlikely(!list_empty(&mnt->mnt_mounts))) { 1243 struct mount *p, *tmp; 1244 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { 1245 __put_mountpoint(unhash_mnt(p), &list); 1246 hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children); 1247 } 1248 } 1249 unlock_mount_hash(); 1250 shrink_dentry_list(&list); 1251 1252 if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) { 1253 struct task_struct *task = current; 1254 if (likely(!(task->flags & PF_KTHREAD))) { 1255 init_task_work(&mnt->mnt_rcu, __cleanup_mnt); 1256 if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME)) 1257 return; 1258 } 1259 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list)) 1260 schedule_delayed_work(&delayed_mntput_work, 1); 1261 return; 1262 } 1263 cleanup_mnt(mnt); 1264 } 1265 1266 void mntput(struct vfsmount *mnt) 1267 { 1268 if (mnt) { 1269 struct mount *m = real_mount(mnt); 1270 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */ 1271 if (unlikely(m->mnt_expiry_mark)) 1272 m->mnt_expiry_mark = 0; 1273 mntput_no_expire(m); 1274 } 1275 } 1276 EXPORT_SYMBOL(mntput); 1277 1278 struct vfsmount *mntget(struct vfsmount *mnt) 1279 { 1280 if (mnt) 1281 mnt_add_count(real_mount(mnt), 1); 1282 return mnt; 1283 } 1284 EXPORT_SYMBOL(mntget); 1285 1286 /** 1287 * path_is_mountpoint() - Check if path is a mount in the current namespace. 1288 * @path: path to check 1289 * 1290 * d_mountpoint() can only be used reliably to establish if a dentry is 1291 * not mounted in any namespace and that common case is handled inline. 1292 * d_mountpoint() isn't aware of the possibility there may be multiple 1293 * mounts using a given dentry in a different namespace. This function 1294 * checks if the passed in path is a mountpoint rather than the dentry 1295 * alone. 1296 */ 1297 bool path_is_mountpoint(const struct path *path) 1298 { 1299 unsigned seq; 1300 bool res; 1301 1302 if (!d_mountpoint(path->dentry)) 1303 return false; 1304 1305 rcu_read_lock(); 1306 do { 1307 seq = read_seqbegin(&mount_lock); 1308 res = __path_is_mountpoint(path); 1309 } while (read_seqretry(&mount_lock, seq)); 1310 rcu_read_unlock(); 1311 1312 return res; 1313 } 1314 EXPORT_SYMBOL(path_is_mountpoint); 1315 1316 struct vfsmount *mnt_clone_internal(const struct path *path) 1317 { 1318 struct mount *p; 1319 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE); 1320 if (IS_ERR(p)) 1321 return ERR_CAST(p); 1322 p->mnt.mnt_flags |= MNT_INTERNAL; 1323 return &p->mnt; 1324 } 1325 1326 #ifdef CONFIG_PROC_FS 1327 static struct mount *mnt_list_next(struct mnt_namespace *ns, 1328 struct list_head *p) 1329 { 1330 struct mount *mnt, *ret = NULL; 1331 1332 lock_ns_list(ns); 1333 list_for_each_continue(p, &ns->list) { 1334 mnt = list_entry(p, typeof(*mnt), mnt_list); 1335 if (!mnt_is_cursor(mnt)) { 1336 ret = mnt; 1337 break; 1338 } 1339 } 1340 unlock_ns_list(ns); 1341 1342 return ret; 1343 } 1344 1345 /* iterator; we want it to have access to namespace_sem, thus here... */ 1346 static void *m_start(struct seq_file *m, loff_t *pos) 1347 { 1348 struct proc_mounts *p = m->private; 1349 struct list_head *prev; 1350 1351 down_read(&namespace_sem); 1352 if (!*pos) { 1353 prev = &p->ns->list; 1354 } else { 1355 prev = &p->cursor.mnt_list; 1356 1357 /* Read after we'd reached the end? */ 1358 if (list_empty(prev)) 1359 return NULL; 1360 } 1361 1362 return mnt_list_next(p->ns, prev); 1363 } 1364 1365 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 1366 { 1367 struct proc_mounts *p = m->private; 1368 struct mount *mnt = v; 1369 1370 ++*pos; 1371 return mnt_list_next(p->ns, &mnt->mnt_list); 1372 } 1373 1374 static void m_stop(struct seq_file *m, void *v) 1375 { 1376 struct proc_mounts *p = m->private; 1377 struct mount *mnt = v; 1378 1379 lock_ns_list(p->ns); 1380 if (mnt) 1381 list_move_tail(&p->cursor.mnt_list, &mnt->mnt_list); 1382 else 1383 list_del_init(&p->cursor.mnt_list); 1384 unlock_ns_list(p->ns); 1385 up_read(&namespace_sem); 1386 } 1387 1388 static int m_show(struct seq_file *m, void *v) 1389 { 1390 struct proc_mounts *p = m->private; 1391 struct mount *r = v; 1392 return p->show(m, &r->mnt); 1393 } 1394 1395 const struct seq_operations mounts_op = { 1396 .start = m_start, 1397 .next = m_next, 1398 .stop = m_stop, 1399 .show = m_show, 1400 }; 1401 1402 void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor) 1403 { 1404 down_read(&namespace_sem); 1405 lock_ns_list(ns); 1406 list_del(&cursor->mnt_list); 1407 unlock_ns_list(ns); 1408 up_read(&namespace_sem); 1409 } 1410 #endif /* CONFIG_PROC_FS */ 1411 1412 /** 1413 * may_umount_tree - check if a mount tree is busy 1414 * @m: root of mount tree 1415 * 1416 * This is called to check if a tree of mounts has any 1417 * open files, pwds, chroots or sub mounts that are 1418 * busy. 1419 */ 1420 int may_umount_tree(struct vfsmount *m) 1421 { 1422 struct mount *mnt = real_mount(m); 1423 int actual_refs = 0; 1424 int minimum_refs = 0; 1425 struct mount *p; 1426 BUG_ON(!m); 1427 1428 /* write lock needed for mnt_get_count */ 1429 lock_mount_hash(); 1430 for (p = mnt; p; p = next_mnt(p, mnt)) { 1431 actual_refs += mnt_get_count(p); 1432 minimum_refs += 2; 1433 } 1434 unlock_mount_hash(); 1435 1436 if (actual_refs > minimum_refs) 1437 return 0; 1438 1439 return 1; 1440 } 1441 1442 EXPORT_SYMBOL(may_umount_tree); 1443 1444 /** 1445 * may_umount - check if a mount point is busy 1446 * @mnt: root of mount 1447 * 1448 * This is called to check if a mount point has any 1449 * open files, pwds, chroots or sub mounts. If the 1450 * mount has sub mounts this will return busy 1451 * regardless of whether the sub mounts are busy. 1452 * 1453 * Doesn't take quota and stuff into account. IOW, in some cases it will 1454 * give false negatives. The main reason why it's here is that we need 1455 * a non-destructive way to look for easily umountable filesystems. 1456 */ 1457 int may_umount(struct vfsmount *mnt) 1458 { 1459 int ret = 1; 1460 down_read(&namespace_sem); 1461 lock_mount_hash(); 1462 if (propagate_mount_busy(real_mount(mnt), 2)) 1463 ret = 0; 1464 unlock_mount_hash(); 1465 up_read(&namespace_sem); 1466 return ret; 1467 } 1468 1469 EXPORT_SYMBOL(may_umount); 1470 1471 static void namespace_unlock(void) 1472 { 1473 struct hlist_head head; 1474 struct hlist_node *p; 1475 struct mount *m; 1476 LIST_HEAD(list); 1477 1478 hlist_move_list(&unmounted, &head); 1479 list_splice_init(&ex_mountpoints, &list); 1480 1481 up_write(&namespace_sem); 1482 1483 shrink_dentry_list(&list); 1484 1485 if (likely(hlist_empty(&head))) 1486 return; 1487 1488 synchronize_rcu_expedited(); 1489 1490 hlist_for_each_entry_safe(m, p, &head, mnt_umount) { 1491 hlist_del(&m->mnt_umount); 1492 mntput(&m->mnt); 1493 } 1494 } 1495 1496 static inline void namespace_lock(void) 1497 { 1498 down_write(&namespace_sem); 1499 } 1500 1501 enum umount_tree_flags { 1502 UMOUNT_SYNC = 1, 1503 UMOUNT_PROPAGATE = 2, 1504 UMOUNT_CONNECTED = 4, 1505 }; 1506 1507 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how) 1508 { 1509 /* Leaving mounts connected is only valid for lazy umounts */ 1510 if (how & UMOUNT_SYNC) 1511 return true; 1512 1513 /* A mount without a parent has nothing to be connected to */ 1514 if (!mnt_has_parent(mnt)) 1515 return true; 1516 1517 /* Because the reference counting rules change when mounts are 1518 * unmounted and connected, umounted mounts may not be 1519 * connected to mounted mounts. 1520 */ 1521 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) 1522 return true; 1523 1524 /* Has it been requested that the mount remain connected? */ 1525 if (how & UMOUNT_CONNECTED) 1526 return false; 1527 1528 /* Is the mount locked such that it needs to remain connected? */ 1529 if (IS_MNT_LOCKED(mnt)) 1530 return false; 1531 1532 /* By default disconnect the mount */ 1533 return true; 1534 } 1535 1536 /* 1537 * mount_lock must be held 1538 * namespace_sem must be held for write 1539 */ 1540 static void umount_tree(struct mount *mnt, enum umount_tree_flags how) 1541 { 1542 LIST_HEAD(tmp_list); 1543 struct mount *p; 1544 1545 if (how & UMOUNT_PROPAGATE) 1546 propagate_mount_unlock(mnt); 1547 1548 /* Gather the mounts to umount */ 1549 for (p = mnt; p; p = next_mnt(p, mnt)) { 1550 p->mnt.mnt_flags |= MNT_UMOUNT; 1551 list_move(&p->mnt_list, &tmp_list); 1552 } 1553 1554 /* Hide the mounts from mnt_mounts */ 1555 list_for_each_entry(p, &tmp_list, mnt_list) { 1556 list_del_init(&p->mnt_child); 1557 } 1558 1559 /* Add propogated mounts to the tmp_list */ 1560 if (how & UMOUNT_PROPAGATE) 1561 propagate_umount(&tmp_list); 1562 1563 while (!list_empty(&tmp_list)) { 1564 struct mnt_namespace *ns; 1565 bool disconnect; 1566 p = list_first_entry(&tmp_list, struct mount, mnt_list); 1567 list_del_init(&p->mnt_expire); 1568 list_del_init(&p->mnt_list); 1569 ns = p->mnt_ns; 1570 if (ns) { 1571 ns->mounts--; 1572 __touch_mnt_namespace(ns); 1573 } 1574 p->mnt_ns = NULL; 1575 if (how & UMOUNT_SYNC) 1576 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; 1577 1578 disconnect = disconnect_mount(p, how); 1579 if (mnt_has_parent(p)) { 1580 mnt_add_count(p->mnt_parent, -1); 1581 if (!disconnect) { 1582 /* Don't forget about p */ 1583 list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts); 1584 } else { 1585 umount_mnt(p); 1586 } 1587 } 1588 change_mnt_propagation(p, MS_PRIVATE); 1589 if (disconnect) 1590 hlist_add_head(&p->mnt_umount, &unmounted); 1591 } 1592 } 1593 1594 static void shrink_submounts(struct mount *mnt); 1595 1596 static int do_umount_root(struct super_block *sb) 1597 { 1598 int ret = 0; 1599 1600 down_write(&sb->s_umount); 1601 if (!sb_rdonly(sb)) { 1602 struct fs_context *fc; 1603 1604 fc = fs_context_for_reconfigure(sb->s_root, SB_RDONLY, 1605 SB_RDONLY); 1606 if (IS_ERR(fc)) { 1607 ret = PTR_ERR(fc); 1608 } else { 1609 ret = parse_monolithic_mount_data(fc, NULL); 1610 if (!ret) 1611 ret = reconfigure_super(fc); 1612 put_fs_context(fc); 1613 } 1614 } 1615 up_write(&sb->s_umount); 1616 return ret; 1617 } 1618 1619 static int do_umount(struct mount *mnt, int flags) 1620 { 1621 struct super_block *sb = mnt->mnt.mnt_sb; 1622 int retval; 1623 1624 retval = security_sb_umount(&mnt->mnt, flags); 1625 if (retval) 1626 return retval; 1627 1628 /* 1629 * Allow userspace to request a mountpoint be expired rather than 1630 * unmounting unconditionally. Unmount only happens if: 1631 * (1) the mark is already set (the mark is cleared by mntput()) 1632 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] 1633 */ 1634 if (flags & MNT_EXPIRE) { 1635 if (&mnt->mnt == current->fs->root.mnt || 1636 flags & (MNT_FORCE | MNT_DETACH)) 1637 return -EINVAL; 1638 1639 /* 1640 * probably don't strictly need the lock here if we examined 1641 * all race cases, but it's a slowpath. 1642 */ 1643 lock_mount_hash(); 1644 if (mnt_get_count(mnt) != 2) { 1645 unlock_mount_hash(); 1646 return -EBUSY; 1647 } 1648 unlock_mount_hash(); 1649 1650 if (!xchg(&mnt->mnt_expiry_mark, 1)) 1651 return -EAGAIN; 1652 } 1653 1654 /* 1655 * If we may have to abort operations to get out of this 1656 * mount, and they will themselves hold resources we must 1657 * allow the fs to do things. In the Unix tradition of 1658 * 'Gee thats tricky lets do it in userspace' the umount_begin 1659 * might fail to complete on the first run through as other tasks 1660 * must return, and the like. Thats for the mount program to worry 1661 * about for the moment. 1662 */ 1663 1664 if (flags & MNT_FORCE && sb->s_op->umount_begin) { 1665 sb->s_op->umount_begin(sb); 1666 } 1667 1668 /* 1669 * No sense to grab the lock for this test, but test itself looks 1670 * somewhat bogus. Suggestions for better replacement? 1671 * Ho-hum... In principle, we might treat that as umount + switch 1672 * to rootfs. GC would eventually take care of the old vfsmount. 1673 * Actually it makes sense, especially if rootfs would contain a 1674 * /reboot - static binary that would close all descriptors and 1675 * call reboot(9). Then init(8) could umount root and exec /reboot. 1676 */ 1677 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { 1678 /* 1679 * Special case for "unmounting" root ... 1680 * we just try to remount it readonly. 1681 */ 1682 if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) 1683 return -EPERM; 1684 return do_umount_root(sb); 1685 } 1686 1687 namespace_lock(); 1688 lock_mount_hash(); 1689 1690 /* Recheck MNT_LOCKED with the locks held */ 1691 retval = -EINVAL; 1692 if (mnt->mnt.mnt_flags & MNT_LOCKED) 1693 goto out; 1694 1695 event++; 1696 if (flags & MNT_DETACH) { 1697 if (!list_empty(&mnt->mnt_list)) 1698 umount_tree(mnt, UMOUNT_PROPAGATE); 1699 retval = 0; 1700 } else { 1701 shrink_submounts(mnt); 1702 retval = -EBUSY; 1703 if (!propagate_mount_busy(mnt, 2)) { 1704 if (!list_empty(&mnt->mnt_list)) 1705 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); 1706 retval = 0; 1707 } 1708 } 1709 out: 1710 unlock_mount_hash(); 1711 namespace_unlock(); 1712 return retval; 1713 } 1714 1715 /* 1716 * __detach_mounts - lazily unmount all mounts on the specified dentry 1717 * 1718 * During unlink, rmdir, and d_drop it is possible to loose the path 1719 * to an existing mountpoint, and wind up leaking the mount. 1720 * detach_mounts allows lazily unmounting those mounts instead of 1721 * leaking them. 1722 * 1723 * The caller may hold dentry->d_inode->i_mutex. 1724 */ 1725 void __detach_mounts(struct dentry *dentry) 1726 { 1727 struct mountpoint *mp; 1728 struct mount *mnt; 1729 1730 namespace_lock(); 1731 lock_mount_hash(); 1732 mp = lookup_mountpoint(dentry); 1733 if (!mp) 1734 goto out_unlock; 1735 1736 event++; 1737 while (!hlist_empty(&mp->m_list)) { 1738 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); 1739 if (mnt->mnt.mnt_flags & MNT_UMOUNT) { 1740 umount_mnt(mnt); 1741 hlist_add_head(&mnt->mnt_umount, &unmounted); 1742 } 1743 else umount_tree(mnt, UMOUNT_CONNECTED); 1744 } 1745 put_mountpoint(mp); 1746 out_unlock: 1747 unlock_mount_hash(); 1748 namespace_unlock(); 1749 } 1750 1751 /* 1752 * Is the caller allowed to modify his namespace? 1753 */ 1754 bool may_mount(void) 1755 { 1756 return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); 1757 } 1758 1759 static void warn_mandlock(void) 1760 { 1761 pr_warn_once("=======================================================\n" 1762 "WARNING: The mand mount option has been deprecated and\n" 1763 " and is ignored by this kernel. Remove the mand\n" 1764 " option from the mount to silence this warning.\n" 1765 "=======================================================\n"); 1766 } 1767 1768 static int can_umount(const struct path *path, int flags) 1769 { 1770 struct mount *mnt = real_mount(path->mnt); 1771 1772 if (!may_mount()) 1773 return -EPERM; 1774 if (path->dentry != path->mnt->mnt_root) 1775 return -EINVAL; 1776 if (!check_mnt(mnt)) 1777 return -EINVAL; 1778 if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */ 1779 return -EINVAL; 1780 if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN)) 1781 return -EPERM; 1782 return 0; 1783 } 1784 1785 // caller is responsible for flags being sane 1786 int path_umount(struct path *path, int flags) 1787 { 1788 struct mount *mnt = real_mount(path->mnt); 1789 int ret; 1790 1791 ret = can_umount(path, flags); 1792 if (!ret) 1793 ret = do_umount(mnt, flags); 1794 1795 /* we mustn't call path_put() as that would clear mnt_expiry_mark */ 1796 dput(path->dentry); 1797 mntput_no_expire(mnt); 1798 return ret; 1799 } 1800 1801 static int ksys_umount(char __user *name, int flags) 1802 { 1803 int lookup_flags = LOOKUP_MOUNTPOINT; 1804 struct path path; 1805 int ret; 1806 1807 // basic validity checks done first 1808 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) 1809 return -EINVAL; 1810 1811 if (!(flags & UMOUNT_NOFOLLOW)) 1812 lookup_flags |= LOOKUP_FOLLOW; 1813 ret = user_path_at(AT_FDCWD, name, lookup_flags, &path); 1814 if (ret) 1815 return ret; 1816 return path_umount(&path, flags); 1817 } 1818 1819 SYSCALL_DEFINE2(umount, char __user *, name, int, flags) 1820 { 1821 return ksys_umount(name, flags); 1822 } 1823 1824 #ifdef __ARCH_WANT_SYS_OLDUMOUNT 1825 1826 /* 1827 * The 2.0 compatible umount. No flags. 1828 */ 1829 SYSCALL_DEFINE1(oldumount, char __user *, name) 1830 { 1831 return ksys_umount(name, 0); 1832 } 1833 1834 #endif 1835 1836 static bool is_mnt_ns_file(struct dentry *dentry) 1837 { 1838 /* Is this a proxy for a mount namespace? */ 1839 return dentry->d_op == &ns_dentry_operations && 1840 dentry->d_fsdata == &mntns_operations; 1841 } 1842 1843 static struct mnt_namespace *to_mnt_ns(struct ns_common *ns) 1844 { 1845 return container_of(ns, struct mnt_namespace, ns); 1846 } 1847 1848 struct ns_common *from_mnt_ns(struct mnt_namespace *mnt) 1849 { 1850 return &mnt->ns; 1851 } 1852 1853 static bool mnt_ns_loop(struct dentry *dentry) 1854 { 1855 /* Could bind mounting the mount namespace inode cause a 1856 * mount namespace loop? 1857 */ 1858 struct mnt_namespace *mnt_ns; 1859 if (!is_mnt_ns_file(dentry)) 1860 return false; 1861 1862 mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode)); 1863 return current->nsproxy->mnt_ns->seq >= mnt_ns->seq; 1864 } 1865 1866 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, 1867 int flag) 1868 { 1869 struct mount *res, *p, *q, *r, *parent; 1870 1871 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt)) 1872 return ERR_PTR(-EINVAL); 1873 1874 if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry)) 1875 return ERR_PTR(-EINVAL); 1876 1877 res = q = clone_mnt(mnt, dentry, flag); 1878 if (IS_ERR(q)) 1879 return q; 1880 1881 q->mnt_mountpoint = mnt->mnt_mountpoint; 1882 1883 p = mnt; 1884 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) { 1885 struct mount *s; 1886 if (!is_subdir(r->mnt_mountpoint, dentry)) 1887 continue; 1888 1889 for (s = r; s; s = next_mnt(s, r)) { 1890 if (!(flag & CL_COPY_UNBINDABLE) && 1891 IS_MNT_UNBINDABLE(s)) { 1892 if (s->mnt.mnt_flags & MNT_LOCKED) { 1893 /* Both unbindable and locked. */ 1894 q = ERR_PTR(-EPERM); 1895 goto out; 1896 } else { 1897 s = skip_mnt_tree(s); 1898 continue; 1899 } 1900 } 1901 if (!(flag & CL_COPY_MNT_NS_FILE) && 1902 is_mnt_ns_file(s->mnt.mnt_root)) { 1903 s = skip_mnt_tree(s); 1904 continue; 1905 } 1906 while (p != s->mnt_parent) { 1907 p = p->mnt_parent; 1908 q = q->mnt_parent; 1909 } 1910 p = s; 1911 parent = q; 1912 q = clone_mnt(p, p->mnt.mnt_root, flag); 1913 if (IS_ERR(q)) 1914 goto out; 1915 lock_mount_hash(); 1916 list_add_tail(&q->mnt_list, &res->mnt_list); 1917 attach_mnt(q, parent, p->mnt_mp); 1918 unlock_mount_hash(); 1919 } 1920 } 1921 return res; 1922 out: 1923 if (res) { 1924 lock_mount_hash(); 1925 umount_tree(res, UMOUNT_SYNC); 1926 unlock_mount_hash(); 1927 } 1928 return q; 1929 } 1930 1931 /* Caller should check returned pointer for errors */ 1932 1933 struct vfsmount *collect_mounts(const struct path *path) 1934 { 1935 struct mount *tree; 1936 namespace_lock(); 1937 if (!check_mnt(real_mount(path->mnt))) 1938 tree = ERR_PTR(-EINVAL); 1939 else 1940 tree = copy_tree(real_mount(path->mnt), path->dentry, 1941 CL_COPY_ALL | CL_PRIVATE); 1942 namespace_unlock(); 1943 if (IS_ERR(tree)) 1944 return ERR_CAST(tree); 1945 return &tree->mnt; 1946 } 1947 1948 static void free_mnt_ns(struct mnt_namespace *); 1949 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool); 1950 1951 void dissolve_on_fput(struct vfsmount *mnt) 1952 { 1953 struct mnt_namespace *ns; 1954 namespace_lock(); 1955 lock_mount_hash(); 1956 ns = real_mount(mnt)->mnt_ns; 1957 if (ns) { 1958 if (is_anon_ns(ns)) 1959 umount_tree(real_mount(mnt), UMOUNT_CONNECTED); 1960 else 1961 ns = NULL; 1962 } 1963 unlock_mount_hash(); 1964 namespace_unlock(); 1965 if (ns) 1966 free_mnt_ns(ns); 1967 } 1968 1969 void drop_collected_mounts(struct vfsmount *mnt) 1970 { 1971 namespace_lock(); 1972 lock_mount_hash(); 1973 umount_tree(real_mount(mnt), 0); 1974 unlock_mount_hash(); 1975 namespace_unlock(); 1976 } 1977 1978 static bool has_locked_children(struct mount *mnt, struct dentry *dentry) 1979 { 1980 struct mount *child; 1981 1982 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { 1983 if (!is_subdir(child->mnt_mountpoint, dentry)) 1984 continue; 1985 1986 if (child->mnt.mnt_flags & MNT_LOCKED) 1987 return true; 1988 } 1989 return false; 1990 } 1991 1992 /** 1993 * clone_private_mount - create a private clone of a path 1994 * @path: path to clone 1995 * 1996 * This creates a new vfsmount, which will be the clone of @path. The new mount 1997 * will not be attached anywhere in the namespace and will be private (i.e. 1998 * changes to the originating mount won't be propagated into this). 1999 * 2000 * Release with mntput(). 2001 */ 2002 struct vfsmount *clone_private_mount(const struct path *path) 2003 { 2004 struct mount *old_mnt = real_mount(path->mnt); 2005 struct mount *new_mnt; 2006 2007 down_read(&namespace_sem); 2008 if (IS_MNT_UNBINDABLE(old_mnt)) 2009 goto invalid; 2010 2011 if (!check_mnt(old_mnt)) 2012 goto invalid; 2013 2014 if (has_locked_children(old_mnt, path->dentry)) 2015 goto invalid; 2016 2017 new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE); 2018 up_read(&namespace_sem); 2019 2020 if (IS_ERR(new_mnt)) 2021 return ERR_CAST(new_mnt); 2022 2023 /* Longterm mount to be removed by kern_unmount*() */ 2024 new_mnt->mnt_ns = MNT_NS_INTERNAL; 2025 2026 return &new_mnt->mnt; 2027 2028 invalid: 2029 up_read(&namespace_sem); 2030 return ERR_PTR(-EINVAL); 2031 } 2032 EXPORT_SYMBOL_GPL(clone_private_mount); 2033 2034 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, 2035 struct vfsmount *root) 2036 { 2037 struct mount *mnt; 2038 int res = f(root, arg); 2039 if (res) 2040 return res; 2041 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) { 2042 res = f(&mnt->mnt, arg); 2043 if (res) 2044 return res; 2045 } 2046 return 0; 2047 } 2048 2049 static void lock_mnt_tree(struct mount *mnt) 2050 { 2051 struct mount *p; 2052 2053 for (p = mnt; p; p = next_mnt(p, mnt)) { 2054 int flags = p->mnt.mnt_flags; 2055 /* Don't allow unprivileged users to change mount flags */ 2056 flags |= MNT_LOCK_ATIME; 2057 2058 if (flags & MNT_READONLY) 2059 flags |= MNT_LOCK_READONLY; 2060 2061 if (flags & MNT_NODEV) 2062 flags |= MNT_LOCK_NODEV; 2063 2064 if (flags & MNT_NOSUID) 2065 flags |= MNT_LOCK_NOSUID; 2066 2067 if (flags & MNT_NOEXEC) 2068 flags |= MNT_LOCK_NOEXEC; 2069 /* Don't allow unprivileged users to reveal what is under a mount */ 2070 if (list_empty(&p->mnt_expire)) 2071 flags |= MNT_LOCKED; 2072 p->mnt.mnt_flags = flags; 2073 } 2074 } 2075 2076 static void cleanup_group_ids(struct mount *mnt, struct mount *end) 2077 { 2078 struct mount *p; 2079 2080 for (p = mnt; p != end; p = next_mnt(p, mnt)) { 2081 if (p->mnt_group_id && !IS_MNT_SHARED(p)) 2082 mnt_release_group_id(p); 2083 } 2084 } 2085 2086 static int invent_group_ids(struct mount *mnt, bool recurse) 2087 { 2088 struct mount *p; 2089 2090 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) { 2091 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) { 2092 int err = mnt_alloc_group_id(p); 2093 if (err) { 2094 cleanup_group_ids(mnt, p); 2095 return err; 2096 } 2097 } 2098 } 2099 2100 return 0; 2101 } 2102 2103 int count_mounts(struct mnt_namespace *ns, struct mount *mnt) 2104 { 2105 unsigned int max = READ_ONCE(sysctl_mount_max); 2106 unsigned int mounts = 0; 2107 struct mount *p; 2108 2109 if (ns->mounts >= max) 2110 return -ENOSPC; 2111 max -= ns->mounts; 2112 if (ns->pending_mounts >= max) 2113 return -ENOSPC; 2114 max -= ns->pending_mounts; 2115 2116 for (p = mnt; p; p = next_mnt(p, mnt)) 2117 mounts++; 2118 2119 if (mounts > max) 2120 return -ENOSPC; 2121 2122 ns->pending_mounts += mounts; 2123 return 0; 2124 } 2125 2126 /* 2127 * @source_mnt : mount tree to be attached 2128 * @nd : place the mount tree @source_mnt is attached 2129 * @parent_nd : if non-null, detach the source_mnt from its parent and 2130 * store the parent mount and mountpoint dentry. 2131 * (done when source_mnt is moved) 2132 * 2133 * NOTE: in the table below explains the semantics when a source mount 2134 * of a given type is attached to a destination mount of a given type. 2135 * --------------------------------------------------------------------------- 2136 * | BIND MOUNT OPERATION | 2137 * |************************************************************************** 2138 * | source-->| shared | private | slave | unbindable | 2139 * | dest | | | | | 2140 * | | | | | | | 2141 * | v | | | | | 2142 * |************************************************************************** 2143 * | shared | shared (++) | shared (+) | shared(+++)| invalid | 2144 * | | | | | | 2145 * |non-shared| shared (+) | private | slave (*) | invalid | 2146 * *************************************************************************** 2147 * A bind operation clones the source mount and mounts the clone on the 2148 * destination mount. 2149 * 2150 * (++) the cloned mount is propagated to all the mounts in the propagation 2151 * tree of the destination mount and the cloned mount is added to 2152 * the peer group of the source mount. 2153 * (+) the cloned mount is created under the destination mount and is marked 2154 * as shared. The cloned mount is added to the peer group of the source 2155 * mount. 2156 * (+++) the mount is propagated to all the mounts in the propagation tree 2157 * of the destination mount and the cloned mount is made slave 2158 * of the same master as that of the source mount. The cloned mount 2159 * is marked as 'shared and slave'. 2160 * (*) the cloned mount is made a slave of the same master as that of the 2161 * source mount. 2162 * 2163 * --------------------------------------------------------------------------- 2164 * | MOVE MOUNT OPERATION | 2165 * |************************************************************************** 2166 * | source-->| shared | private | slave | unbindable | 2167 * | dest | | | | | 2168 * | | | | | | | 2169 * | v | | | | | 2170 * |************************************************************************** 2171 * | shared | shared (+) | shared (+) | shared(+++) | invalid | 2172 * | | | | | | 2173 * |non-shared| shared (+*) | private | slave (*) | unbindable | 2174 * *************************************************************************** 2175 * 2176 * (+) the mount is moved to the destination. And is then propagated to 2177 * all the mounts in the propagation tree of the destination mount. 2178 * (+*) the mount is moved to the destination. 2179 * (+++) the mount is moved to the destination and is then propagated to 2180 * all the mounts belonging to the destination mount's propagation tree. 2181 * the mount is marked as 'shared and slave'. 2182 * (*) the mount continues to be a slave at the new location. 2183 * 2184 * if the source mount is a tree, the operations explained above is 2185 * applied to each mount in the tree. 2186 * Must be called without spinlocks held, since this function can sleep 2187 * in allocations. 2188 */ 2189 static int attach_recursive_mnt(struct mount *source_mnt, 2190 struct mount *dest_mnt, 2191 struct mountpoint *dest_mp, 2192 bool moving) 2193 { 2194 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; 2195 HLIST_HEAD(tree_list); 2196 struct mnt_namespace *ns = dest_mnt->mnt_ns; 2197 struct mountpoint *smp; 2198 struct mount *child, *p; 2199 struct hlist_node *n; 2200 int err; 2201 2202 /* Preallocate a mountpoint in case the new mounts need 2203 * to be tucked under other mounts. 2204 */ 2205 smp = get_mountpoint(source_mnt->mnt.mnt_root); 2206 if (IS_ERR(smp)) 2207 return PTR_ERR(smp); 2208 2209 /* Is there space to add these mounts to the mount namespace? */ 2210 if (!moving) { 2211 err = count_mounts(ns, source_mnt); 2212 if (err) 2213 goto out; 2214 } 2215 2216 if (IS_MNT_SHARED(dest_mnt)) { 2217 err = invent_group_ids(source_mnt, true); 2218 if (err) 2219 goto out; 2220 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); 2221 lock_mount_hash(); 2222 if (err) 2223 goto out_cleanup_ids; 2224 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 2225 set_mnt_shared(p); 2226 } else { 2227 lock_mount_hash(); 2228 } 2229 if (moving) { 2230 unhash_mnt(source_mnt); 2231 attach_mnt(source_mnt, dest_mnt, dest_mp); 2232 touch_mnt_namespace(source_mnt->mnt_ns); 2233 } else { 2234 if (source_mnt->mnt_ns) { 2235 /* move from anon - the caller will destroy */ 2236 list_del_init(&source_mnt->mnt_ns->list); 2237 } 2238 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); 2239 commit_tree(source_mnt); 2240 } 2241 2242 hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) { 2243 struct mount *q; 2244 hlist_del_init(&child->mnt_hash); 2245 q = __lookup_mnt(&child->mnt_parent->mnt, 2246 child->mnt_mountpoint); 2247 if (q) 2248 mnt_change_mountpoint(child, smp, q); 2249 /* Notice when we are propagating across user namespaces */ 2250 if (child->mnt_parent->mnt_ns->user_ns != user_ns) 2251 lock_mnt_tree(child); 2252 child->mnt.mnt_flags &= ~MNT_LOCKED; 2253 commit_tree(child); 2254 } 2255 put_mountpoint(smp); 2256 unlock_mount_hash(); 2257 2258 return 0; 2259 2260 out_cleanup_ids: 2261 while (!hlist_empty(&tree_list)) { 2262 child = hlist_entry(tree_list.first, struct mount, mnt_hash); 2263 child->mnt_parent->mnt_ns->pending_mounts = 0; 2264 umount_tree(child, UMOUNT_SYNC); 2265 } 2266 unlock_mount_hash(); 2267 cleanup_group_ids(source_mnt, NULL); 2268 out: 2269 ns->pending_mounts = 0; 2270 2271 read_seqlock_excl(&mount_lock); 2272 put_mountpoint(smp); 2273 read_sequnlock_excl(&mount_lock); 2274 2275 return err; 2276 } 2277 2278 static struct mountpoint *lock_mount(struct path *path) 2279 { 2280 struct vfsmount *mnt; 2281 struct dentry *dentry = path->dentry; 2282 retry: 2283 inode_lock(dentry->d_inode); 2284 if (unlikely(cant_mount(dentry))) { 2285 inode_unlock(dentry->d_inode); 2286 return ERR_PTR(-ENOENT); 2287 } 2288 namespace_lock(); 2289 mnt = lookup_mnt(path); 2290 if (likely(!mnt)) { 2291 struct mountpoint *mp = get_mountpoint(dentry); 2292 if (IS_ERR(mp)) { 2293 namespace_unlock(); 2294 inode_unlock(dentry->d_inode); 2295 return mp; 2296 } 2297 return mp; 2298 } 2299 namespace_unlock(); 2300 inode_unlock(path->dentry->d_inode); 2301 path_put(path); 2302 path->mnt = mnt; 2303 dentry = path->dentry = dget(mnt->mnt_root); 2304 goto retry; 2305 } 2306 2307 static void unlock_mount(struct mountpoint *where) 2308 { 2309 struct dentry *dentry = where->m_dentry; 2310 2311 read_seqlock_excl(&mount_lock); 2312 put_mountpoint(where); 2313 read_sequnlock_excl(&mount_lock); 2314 2315 namespace_unlock(); 2316 inode_unlock(dentry->d_inode); 2317 } 2318 2319 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp) 2320 { 2321 if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER) 2322 return -EINVAL; 2323 2324 if (d_is_dir(mp->m_dentry) != 2325 d_is_dir(mnt->mnt.mnt_root)) 2326 return -ENOTDIR; 2327 2328 return attach_recursive_mnt(mnt, p, mp, false); 2329 } 2330 2331 /* 2332 * Sanity check the flags to change_mnt_propagation. 2333 */ 2334 2335 static int flags_to_propagation_type(int ms_flags) 2336 { 2337 int type = ms_flags & ~(MS_REC | MS_SILENT); 2338 2339 /* Fail if any non-propagation flags are set */ 2340 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) 2341 return 0; 2342 /* Only one propagation flag should be set */ 2343 if (!is_power_of_2(type)) 2344 return 0; 2345 return type; 2346 } 2347 2348 /* 2349 * recursively change the type of the mountpoint. 2350 */ 2351 static int do_change_type(struct path *path, int ms_flags) 2352 { 2353 struct mount *m; 2354 struct mount *mnt = real_mount(path->mnt); 2355 int recurse = ms_flags & MS_REC; 2356 int type; 2357 int err = 0; 2358 2359 if (path->dentry != path->mnt->mnt_root) 2360 return -EINVAL; 2361 2362 type = flags_to_propagation_type(ms_flags); 2363 if (!type) 2364 return -EINVAL; 2365 2366 namespace_lock(); 2367 if (type == MS_SHARED) { 2368 err = invent_group_ids(mnt, recurse); 2369 if (err) 2370 goto out_unlock; 2371 } 2372 2373 lock_mount_hash(); 2374 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) 2375 change_mnt_propagation(m, type); 2376 unlock_mount_hash(); 2377 2378 out_unlock: 2379 namespace_unlock(); 2380 return err; 2381 } 2382 2383 static struct mount *__do_loopback(struct path *old_path, int recurse) 2384 { 2385 struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt); 2386 2387 if (IS_MNT_UNBINDABLE(old)) 2388 return mnt; 2389 2390 if (!check_mnt(old) && old_path->dentry->d_op != &ns_dentry_operations) 2391 return mnt; 2392 2393 if (!recurse && has_locked_children(old, old_path->dentry)) 2394 return mnt; 2395 2396 if (recurse) 2397 mnt = copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE); 2398 else 2399 mnt = clone_mnt(old, old_path->dentry, 0); 2400 2401 if (!IS_ERR(mnt)) 2402 mnt->mnt.mnt_flags &= ~MNT_LOCKED; 2403 2404 return mnt; 2405 } 2406 2407 /* 2408 * do loopback mount. 2409 */ 2410 static int do_loopback(struct path *path, const char *old_name, 2411 int recurse) 2412 { 2413 struct path old_path; 2414 struct mount *mnt = NULL, *parent; 2415 struct mountpoint *mp; 2416 int err; 2417 if (!old_name || !*old_name) 2418 return -EINVAL; 2419 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path); 2420 if (err) 2421 return err; 2422 2423 err = -EINVAL; 2424 if (mnt_ns_loop(old_path.dentry)) 2425 goto out; 2426 2427 mp = lock_mount(path); 2428 if (IS_ERR(mp)) { 2429 err = PTR_ERR(mp); 2430 goto out; 2431 } 2432 2433 parent = real_mount(path->mnt); 2434 if (!check_mnt(parent)) 2435 goto out2; 2436 2437 mnt = __do_loopback(&old_path, recurse); 2438 if (IS_ERR(mnt)) { 2439 err = PTR_ERR(mnt); 2440 goto out2; 2441 } 2442 2443 err = graft_tree(mnt, parent, mp); 2444 if (err) { 2445 lock_mount_hash(); 2446 umount_tree(mnt, UMOUNT_SYNC); 2447 unlock_mount_hash(); 2448 } 2449 out2: 2450 unlock_mount(mp); 2451 out: 2452 path_put(&old_path); 2453 return err; 2454 } 2455 2456 static struct file *open_detached_copy(struct path *path, bool recursive) 2457 { 2458 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; 2459 struct mnt_namespace *ns = alloc_mnt_ns(user_ns, true); 2460 struct mount *mnt, *p; 2461 struct file *file; 2462 2463 if (IS_ERR(ns)) 2464 return ERR_CAST(ns); 2465 2466 namespace_lock(); 2467 mnt = __do_loopback(path, recursive); 2468 if (IS_ERR(mnt)) { 2469 namespace_unlock(); 2470 free_mnt_ns(ns); 2471 return ERR_CAST(mnt); 2472 } 2473 2474 lock_mount_hash(); 2475 for (p = mnt; p; p = next_mnt(p, mnt)) { 2476 p->mnt_ns = ns; 2477 ns->mounts++; 2478 } 2479 ns->root = mnt; 2480 list_add_tail(&ns->list, &mnt->mnt_list); 2481 mntget(&mnt->mnt); 2482 unlock_mount_hash(); 2483 namespace_unlock(); 2484 2485 mntput(path->mnt); 2486 path->mnt = &mnt->mnt; 2487 file = dentry_open(path, O_PATH, current_cred()); 2488 if (IS_ERR(file)) 2489 dissolve_on_fput(path->mnt); 2490 else 2491 file->f_mode |= FMODE_NEED_UNMOUNT; 2492 return file; 2493 } 2494 2495 SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags) 2496 { 2497 struct file *file; 2498 struct path path; 2499 int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW; 2500 bool detached = flags & OPEN_TREE_CLONE; 2501 int error; 2502 int fd; 2503 2504 BUILD_BUG_ON(OPEN_TREE_CLOEXEC != O_CLOEXEC); 2505 2506 if (flags & ~(AT_EMPTY_PATH | AT_NO_AUTOMOUNT | AT_RECURSIVE | 2507 AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE | 2508 OPEN_TREE_CLOEXEC)) 2509 return -EINVAL; 2510 2511 if ((flags & (AT_RECURSIVE | OPEN_TREE_CLONE)) == AT_RECURSIVE) 2512 return -EINVAL; 2513 2514 if (flags & AT_NO_AUTOMOUNT) 2515 lookup_flags &= ~LOOKUP_AUTOMOUNT; 2516 if (flags & AT_SYMLINK_NOFOLLOW) 2517 lookup_flags &= ~LOOKUP_FOLLOW; 2518 if (flags & AT_EMPTY_PATH) 2519 lookup_flags |= LOOKUP_EMPTY; 2520 2521 if (detached && !may_mount()) 2522 return -EPERM; 2523 2524 fd = get_unused_fd_flags(flags & O_CLOEXEC); 2525 if (fd < 0) 2526 return fd; 2527 2528 error = user_path_at(dfd, filename, lookup_flags, &path); 2529 if (unlikely(error)) { 2530 file = ERR_PTR(error); 2531 } else { 2532 if (detached) 2533 file = open_detached_copy(&path, flags & AT_RECURSIVE); 2534 else 2535 file = dentry_open(&path, O_PATH, current_cred()); 2536 path_put(&path); 2537 } 2538 if (IS_ERR(file)) { 2539 put_unused_fd(fd); 2540 return PTR_ERR(file); 2541 } 2542 fd_install(fd, file); 2543 return fd; 2544 } 2545 2546 /* 2547 * Don't allow locked mount flags to be cleared. 2548 * 2549 * No locks need to be held here while testing the various MNT_LOCK 2550 * flags because those flags can never be cleared once they are set. 2551 */ 2552 static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags) 2553 { 2554 unsigned int fl = mnt->mnt.mnt_flags; 2555 2556 if ((fl & MNT_LOCK_READONLY) && 2557 !(mnt_flags & MNT_READONLY)) 2558 return false; 2559 2560 if ((fl & MNT_LOCK_NODEV) && 2561 !(mnt_flags & MNT_NODEV)) 2562 return false; 2563 2564 if ((fl & MNT_LOCK_NOSUID) && 2565 !(mnt_flags & MNT_NOSUID)) 2566 return false; 2567 2568 if ((fl & MNT_LOCK_NOEXEC) && 2569 !(mnt_flags & MNT_NOEXEC)) 2570 return false; 2571 2572 if ((fl & MNT_LOCK_ATIME) && 2573 ((fl & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) 2574 return false; 2575 2576 return true; 2577 } 2578 2579 static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags) 2580 { 2581 bool readonly_request = (mnt_flags & MNT_READONLY); 2582 2583 if (readonly_request == __mnt_is_readonly(&mnt->mnt)) 2584 return 0; 2585 2586 if (readonly_request) 2587 return mnt_make_readonly(mnt); 2588 2589 mnt->mnt.mnt_flags &= ~MNT_READONLY; 2590 return 0; 2591 } 2592 2593 static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags) 2594 { 2595 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK; 2596 mnt->mnt.mnt_flags = mnt_flags; 2597 touch_mnt_namespace(mnt->mnt_ns); 2598 } 2599 2600 static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt) 2601 { 2602 struct super_block *sb = mnt->mnt_sb; 2603 2604 if (!__mnt_is_readonly(mnt) && 2605 (!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) && 2606 (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) { 2607 char *buf = (char *)__get_free_page(GFP_KERNEL); 2608 char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(-ENOMEM); 2609 struct tm tm; 2610 2611 time64_to_tm(sb->s_time_max, 0, &tm); 2612 2613 pr_warn("%s filesystem being %s at %s supports timestamps until %04ld (0x%llx)\n", 2614 sb->s_type->name, 2615 is_mounted(mnt) ? "remounted" : "mounted", 2616 mntpath, 2617 tm.tm_year+1900, (unsigned long long)sb->s_time_max); 2618 2619 free_page((unsigned long)buf); 2620 sb->s_iflags |= SB_I_TS_EXPIRY_WARNED; 2621 } 2622 } 2623 2624 /* 2625 * Handle reconfiguration of the mountpoint only without alteration of the 2626 * superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND 2627 * to mount(2). 2628 */ 2629 static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags) 2630 { 2631 struct super_block *sb = path->mnt->mnt_sb; 2632 struct mount *mnt = real_mount(path->mnt); 2633 int ret; 2634 2635 if (!check_mnt(mnt)) 2636 return -EINVAL; 2637 2638 if (path->dentry != mnt->mnt.mnt_root) 2639 return -EINVAL; 2640 2641 if (!can_change_locked_flags(mnt, mnt_flags)) 2642 return -EPERM; 2643 2644 /* 2645 * We're only checking whether the superblock is read-only not 2646 * changing it, so only take down_read(&sb->s_umount). 2647 */ 2648 down_read(&sb->s_umount); 2649 lock_mount_hash(); 2650 ret = change_mount_ro_state(mnt, mnt_flags); 2651 if (ret == 0) 2652 set_mount_attributes(mnt, mnt_flags); 2653 unlock_mount_hash(); 2654 up_read(&sb->s_umount); 2655 2656 mnt_warn_timestamp_expiry(path, &mnt->mnt); 2657 2658 return ret; 2659 } 2660 2661 /* 2662 * change filesystem flags. dir should be a physical root of filesystem. 2663 * If you've mounted a non-root directory somewhere and want to do remount 2664 * on it - tough luck. 2665 */ 2666 static int do_remount(struct path *path, int ms_flags, int sb_flags, 2667 int mnt_flags, void *data) 2668 { 2669 int err; 2670 struct super_block *sb = path->mnt->mnt_sb; 2671 struct mount *mnt = real_mount(path->mnt); 2672 struct fs_context *fc; 2673 2674 if (!check_mnt(mnt)) 2675 return -EINVAL; 2676 2677 if (path->dentry != path->mnt->mnt_root) 2678 return -EINVAL; 2679 2680 if (!can_change_locked_flags(mnt, mnt_flags)) 2681 return -EPERM; 2682 2683 fc = fs_context_for_reconfigure(path->dentry, sb_flags, MS_RMT_MASK); 2684 if (IS_ERR(fc)) 2685 return PTR_ERR(fc); 2686 2687 fc->oldapi = true; 2688 err = parse_monolithic_mount_data(fc, data); 2689 if (!err) { 2690 down_write(&sb->s_umount); 2691 err = -EPERM; 2692 if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) { 2693 err = reconfigure_super(fc); 2694 if (!err) { 2695 lock_mount_hash(); 2696 set_mount_attributes(mnt, mnt_flags); 2697 unlock_mount_hash(); 2698 } 2699 } 2700 up_write(&sb->s_umount); 2701 } 2702 2703 mnt_warn_timestamp_expiry(path, &mnt->mnt); 2704 2705 put_fs_context(fc); 2706 return err; 2707 } 2708 2709 static inline int tree_contains_unbindable(struct mount *mnt) 2710 { 2711 struct mount *p; 2712 for (p = mnt; p; p = next_mnt(p, mnt)) { 2713 if (IS_MNT_UNBINDABLE(p)) 2714 return 1; 2715 } 2716 return 0; 2717 } 2718 2719 /* 2720 * Check that there aren't references to earlier/same mount namespaces in the 2721 * specified subtree. Such references can act as pins for mount namespaces 2722 * that aren't checked by the mount-cycle checking code, thereby allowing 2723 * cycles to be made. 2724 */ 2725 static bool check_for_nsfs_mounts(struct mount *subtree) 2726 { 2727 struct mount *p; 2728 bool ret = false; 2729 2730 lock_mount_hash(); 2731 for (p = subtree; p; p = next_mnt(p, subtree)) 2732 if (mnt_ns_loop(p->mnt.mnt_root)) 2733 goto out; 2734 2735 ret = true; 2736 out: 2737 unlock_mount_hash(); 2738 return ret; 2739 } 2740 2741 static int do_set_group(struct path *from_path, struct path *to_path) 2742 { 2743 struct mount *from, *to; 2744 int err; 2745 2746 from = real_mount(from_path->mnt); 2747 to = real_mount(to_path->mnt); 2748 2749 namespace_lock(); 2750 2751 err = -EINVAL; 2752 /* To and From must be mounted */ 2753 if (!is_mounted(&from->mnt)) 2754 goto out; 2755 if (!is_mounted(&to->mnt)) 2756 goto out; 2757 2758 err = -EPERM; 2759 /* We should be allowed to modify mount namespaces of both mounts */ 2760 if (!ns_capable(from->mnt_ns->user_ns, CAP_SYS_ADMIN)) 2761 goto out; 2762 if (!ns_capable(to->mnt_ns->user_ns, CAP_SYS_ADMIN)) 2763 goto out; 2764 2765 err = -EINVAL; 2766 /* To and From paths should be mount roots */ 2767 if (from_path->dentry != from_path->mnt->mnt_root) 2768 goto out; 2769 if (to_path->dentry != to_path->mnt->mnt_root) 2770 goto out; 2771 2772 /* Setting sharing groups is only allowed across same superblock */ 2773 if (from->mnt.mnt_sb != to->mnt.mnt_sb) 2774 goto out; 2775 2776 /* From mount root should be wider than To mount root */ 2777 if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root)) 2778 goto out; 2779 2780 /* From mount should not have locked children in place of To's root */ 2781 if (has_locked_children(from, to->mnt.mnt_root)) 2782 goto out; 2783 2784 /* Setting sharing groups is only allowed on private mounts */ 2785 if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to)) 2786 goto out; 2787 2788 /* From should not be private */ 2789 if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from)) 2790 goto out; 2791 2792 if (IS_MNT_SLAVE(from)) { 2793 struct mount *m = from->mnt_master; 2794 2795 list_add(&to->mnt_slave, &m->mnt_slave_list); 2796 to->mnt_master = m; 2797 } 2798 2799 if (IS_MNT_SHARED(from)) { 2800 to->mnt_group_id = from->mnt_group_id; 2801 list_add(&to->mnt_share, &from->mnt_share); 2802 lock_mount_hash(); 2803 set_mnt_shared(to); 2804 unlock_mount_hash(); 2805 } 2806 2807 err = 0; 2808 out: 2809 namespace_unlock(); 2810 return err; 2811 } 2812 2813 static int do_move_mount(struct path *old_path, struct path *new_path) 2814 { 2815 struct mnt_namespace *ns; 2816 struct mount *p; 2817 struct mount *old; 2818 struct mount *parent; 2819 struct mountpoint *mp, *old_mp; 2820 int err; 2821 bool attached; 2822 2823 mp = lock_mount(new_path); 2824 if (IS_ERR(mp)) 2825 return PTR_ERR(mp); 2826 2827 old = real_mount(old_path->mnt); 2828 p = real_mount(new_path->mnt); 2829 parent = old->mnt_parent; 2830 attached = mnt_has_parent(old); 2831 old_mp = old->mnt_mp; 2832 ns = old->mnt_ns; 2833 2834 err = -EINVAL; 2835 /* The mountpoint must be in our namespace. */ 2836 if (!check_mnt(p)) 2837 goto out; 2838 2839 /* The thing moved must be mounted... */ 2840 if (!is_mounted(&old->mnt)) 2841 goto out; 2842 2843 /* ... and either ours or the root of anon namespace */ 2844 if (!(attached ? check_mnt(old) : is_anon_ns(ns))) 2845 goto out; 2846 2847 if (old->mnt.mnt_flags & MNT_LOCKED) 2848 goto out; 2849 2850 if (old_path->dentry != old_path->mnt->mnt_root) 2851 goto out; 2852 2853 if (d_is_dir(new_path->dentry) != 2854 d_is_dir(old_path->dentry)) 2855 goto out; 2856 /* 2857 * Don't move a mount residing in a shared parent. 2858 */ 2859 if (attached && IS_MNT_SHARED(parent)) 2860 goto out; 2861 /* 2862 * Don't move a mount tree containing unbindable mounts to a destination 2863 * mount which is shared. 2864 */ 2865 if (IS_MNT_SHARED(p) && tree_contains_unbindable(old)) 2866 goto out; 2867 err = -ELOOP; 2868 if (!check_for_nsfs_mounts(old)) 2869 goto out; 2870 for (; mnt_has_parent(p); p = p->mnt_parent) 2871 if (p == old) 2872 goto out; 2873 2874 err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp, 2875 attached); 2876 if (err) 2877 goto out; 2878 2879 /* if the mount is moved, it should no longer be expire 2880 * automatically */ 2881 list_del_init(&old->mnt_expire); 2882 if (attached) 2883 put_mountpoint(old_mp); 2884 out: 2885 unlock_mount(mp); 2886 if (!err) { 2887 if (attached) 2888 mntput_no_expire(parent); 2889 else 2890 free_mnt_ns(ns); 2891 } 2892 return err; 2893 } 2894 2895 static int do_move_mount_old(struct path *path, const char *old_name) 2896 { 2897 struct path old_path; 2898 int err; 2899 2900 if (!old_name || !*old_name) 2901 return -EINVAL; 2902 2903 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); 2904 if (err) 2905 return err; 2906 2907 err = do_move_mount(&old_path, path); 2908 path_put(&old_path); 2909 return err; 2910 } 2911 2912 /* 2913 * add a mount into a namespace's mount tree 2914 */ 2915 static int do_add_mount(struct mount *newmnt, struct mountpoint *mp, 2916 const struct path *path, int mnt_flags) 2917 { 2918 struct mount *parent = real_mount(path->mnt); 2919 2920 mnt_flags &= ~MNT_INTERNAL_FLAGS; 2921 2922 if (unlikely(!check_mnt(parent))) { 2923 /* that's acceptable only for automounts done in private ns */ 2924 if (!(mnt_flags & MNT_SHRINKABLE)) 2925 return -EINVAL; 2926 /* ... and for those we'd better have mountpoint still alive */ 2927 if (!parent->mnt_ns) 2928 return -EINVAL; 2929 } 2930 2931 /* Refuse the same filesystem on the same mount point */ 2932 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && 2933 path->mnt->mnt_root == path->dentry) 2934 return -EBUSY; 2935 2936 if (d_is_symlink(newmnt->mnt.mnt_root)) 2937 return -EINVAL; 2938 2939 newmnt->mnt.mnt_flags = mnt_flags; 2940 return graft_tree(newmnt, parent, mp); 2941 } 2942 2943 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags); 2944 2945 /* 2946 * Create a new mount using a superblock configuration and request it 2947 * be added to the namespace tree. 2948 */ 2949 static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint, 2950 unsigned int mnt_flags) 2951 { 2952 struct vfsmount *mnt; 2953 struct mountpoint *mp; 2954 struct super_block *sb = fc->root->d_sb; 2955 int error; 2956 2957 error = security_sb_kern_mount(sb); 2958 if (!error && mount_too_revealing(sb, &mnt_flags)) 2959 error = -EPERM; 2960 2961 if (unlikely(error)) { 2962 fc_drop_locked(fc); 2963 return error; 2964 } 2965 2966 up_write(&sb->s_umount); 2967 2968 mnt = vfs_create_mount(fc); 2969 if (IS_ERR(mnt)) 2970 return PTR_ERR(mnt); 2971 2972 mnt_warn_timestamp_expiry(mountpoint, mnt); 2973 2974 mp = lock_mount(mountpoint); 2975 if (IS_ERR(mp)) { 2976 mntput(mnt); 2977 return PTR_ERR(mp); 2978 } 2979 error = do_add_mount(real_mount(mnt), mp, mountpoint, mnt_flags); 2980 unlock_mount(mp); 2981 if (error < 0) 2982 mntput(mnt); 2983 return error; 2984 } 2985 2986 /* 2987 * create a new mount for userspace and request it to be added into the 2988 * namespace's tree 2989 */ 2990 static int do_new_mount(struct path *path, const char *fstype, int sb_flags, 2991 int mnt_flags, const char *name, void *data) 2992 { 2993 struct file_system_type *type; 2994 struct fs_context *fc; 2995 const char *subtype = NULL; 2996 int err = 0; 2997 2998 if (!fstype) 2999 return -EINVAL; 3000 3001 type = get_fs_type(fstype); 3002 if (!type) 3003 return -ENODEV; 3004 3005 if (type->fs_flags & FS_HAS_SUBTYPE) { 3006 subtype = strchr(fstype, '.'); 3007 if (subtype) { 3008 subtype++; 3009 if (!*subtype) { 3010 put_filesystem(type); 3011 return -EINVAL; 3012 } 3013 } 3014 } 3015 3016 fc = fs_context_for_mount(type, sb_flags); 3017 put_filesystem(type); 3018 if (IS_ERR(fc)) 3019 return PTR_ERR(fc); 3020 3021 if (subtype) 3022 err = vfs_parse_fs_string(fc, "subtype", 3023 subtype, strlen(subtype)); 3024 if (!err && name) 3025 err = vfs_parse_fs_string(fc, "source", name, strlen(name)); 3026 if (!err) 3027 err = parse_monolithic_mount_data(fc, data); 3028 if (!err && !mount_capable(fc)) 3029 err = -EPERM; 3030 if (!err) 3031 err = vfs_get_tree(fc); 3032 if (!err) 3033 err = do_new_mount_fc(fc, path, mnt_flags); 3034 3035 put_fs_context(fc); 3036 return err; 3037 } 3038 3039 int finish_automount(struct vfsmount *m, const struct path *path) 3040 { 3041 struct dentry *dentry = path->dentry; 3042 struct mountpoint *mp; 3043 struct mount *mnt; 3044 int err; 3045 3046 if (!m) 3047 return 0; 3048 if (IS_ERR(m)) 3049 return PTR_ERR(m); 3050 3051 mnt = real_mount(m); 3052 /* The new mount record should have at least 2 refs to prevent it being 3053 * expired before we get a chance to add it 3054 */ 3055 BUG_ON(mnt_get_count(mnt) < 2); 3056 3057 if (m->mnt_sb == path->mnt->mnt_sb && 3058 m->mnt_root == dentry) { 3059 err = -ELOOP; 3060 goto discard; 3061 } 3062 3063 /* 3064 * we don't want to use lock_mount() - in this case finding something 3065 * that overmounts our mountpoint to be means "quitely drop what we've 3066 * got", not "try to mount it on top". 3067 */ 3068 inode_lock(dentry->d_inode); 3069 namespace_lock(); 3070 if (unlikely(cant_mount(dentry))) { 3071 err = -ENOENT; 3072 goto discard_locked; 3073 } 3074 rcu_read_lock(); 3075 if (unlikely(__lookup_mnt(path->mnt, dentry))) { 3076 rcu_read_unlock(); 3077 err = 0; 3078 goto discard_locked; 3079 } 3080 rcu_read_unlock(); 3081 mp = get_mountpoint(dentry); 3082 if (IS_ERR(mp)) { 3083 err = PTR_ERR(mp); 3084 goto discard_locked; 3085 } 3086 3087 err = do_add_mount(mnt, mp, path, path->mnt->mnt_flags | MNT_SHRINKABLE); 3088 unlock_mount(mp); 3089 if (unlikely(err)) 3090 goto discard; 3091 mntput(m); 3092 return 0; 3093 3094 discard_locked: 3095 namespace_unlock(); 3096 inode_unlock(dentry->d_inode); 3097 discard: 3098 /* remove m from any expiration list it may be on */ 3099 if (!list_empty(&mnt->mnt_expire)) { 3100 namespace_lock(); 3101 list_del_init(&mnt->mnt_expire); 3102 namespace_unlock(); 3103 } 3104 mntput(m); 3105 mntput(m); 3106 return err; 3107 } 3108 3109 /** 3110 * mnt_set_expiry - Put a mount on an expiration list 3111 * @mnt: The mount to list. 3112 * @expiry_list: The list to add the mount to. 3113 */ 3114 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) 3115 { 3116 namespace_lock(); 3117 3118 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); 3119 3120 namespace_unlock(); 3121 } 3122 EXPORT_SYMBOL(mnt_set_expiry); 3123 3124 /* 3125 * process a list of expirable mountpoints with the intent of discarding any 3126 * mountpoints that aren't in use and haven't been touched since last we came 3127 * here 3128 */ 3129 void mark_mounts_for_expiry(struct list_head *mounts) 3130 { 3131 struct mount *mnt, *next; 3132 LIST_HEAD(graveyard); 3133 3134 if (list_empty(mounts)) 3135 return; 3136 3137 namespace_lock(); 3138 lock_mount_hash(); 3139 3140 /* extract from the expiration list every vfsmount that matches the 3141 * following criteria: 3142 * - only referenced by its parent vfsmount 3143 * - still marked for expiry (marked on the last call here; marks are 3144 * cleared by mntput()) 3145 */ 3146 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) { 3147 if (!xchg(&mnt->mnt_expiry_mark, 1) || 3148 propagate_mount_busy(mnt, 1)) 3149 continue; 3150 list_move(&mnt->mnt_expire, &graveyard); 3151 } 3152 while (!list_empty(&graveyard)) { 3153 mnt = list_first_entry(&graveyard, struct mount, mnt_expire); 3154 touch_mnt_namespace(mnt->mnt_ns); 3155 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); 3156 } 3157 unlock_mount_hash(); 3158 namespace_unlock(); 3159 } 3160 3161 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry); 3162 3163 /* 3164 * Ripoff of 'select_parent()' 3165 * 3166 * search the list of submounts for a given mountpoint, and move any 3167 * shrinkable submounts to the 'graveyard' list. 3168 */ 3169 static int select_submounts(struct mount *parent, struct list_head *graveyard) 3170 { 3171 struct mount *this_parent = parent; 3172 struct list_head *next; 3173 int found = 0; 3174 3175 repeat: 3176 next = this_parent->mnt_mounts.next; 3177 resume: 3178 while (next != &this_parent->mnt_mounts) { 3179 struct list_head *tmp = next; 3180 struct mount *mnt = list_entry(tmp, struct mount, mnt_child); 3181 3182 next = tmp->next; 3183 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE)) 3184 continue; 3185 /* 3186 * Descend a level if the d_mounts list is non-empty. 3187 */ 3188 if (!list_empty(&mnt->mnt_mounts)) { 3189 this_parent = mnt; 3190 goto repeat; 3191 } 3192 3193 if (!propagate_mount_busy(mnt, 1)) { 3194 list_move_tail(&mnt->mnt_expire, graveyard); 3195 found++; 3196 } 3197 } 3198 /* 3199 * All done at this level ... ascend and resume the search 3200 */ 3201 if (this_parent != parent) { 3202 next = this_parent->mnt_child.next; 3203 this_parent = this_parent->mnt_parent; 3204 goto resume; 3205 } 3206 return found; 3207 } 3208 3209 /* 3210 * process a list of expirable mountpoints with the intent of discarding any 3211 * submounts of a specific parent mountpoint 3212 * 3213 * mount_lock must be held for write 3214 */ 3215 static void shrink_submounts(struct mount *mnt) 3216 { 3217 LIST_HEAD(graveyard); 3218 struct mount *m; 3219 3220 /* extract submounts of 'mountpoint' from the expiration list */ 3221 while (select_submounts(mnt, &graveyard)) { 3222 while (!list_empty(&graveyard)) { 3223 m = list_first_entry(&graveyard, struct mount, 3224 mnt_expire); 3225 touch_mnt_namespace(m->mnt_ns); 3226 umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC); 3227 } 3228 } 3229 } 3230 3231 static void *copy_mount_options(const void __user * data) 3232 { 3233 char *copy; 3234 unsigned left, offset; 3235 3236 if (!data) 3237 return NULL; 3238 3239 copy = kmalloc(PAGE_SIZE, GFP_KERNEL); 3240 if (!copy) 3241 return ERR_PTR(-ENOMEM); 3242 3243 left = copy_from_user(copy, data, PAGE_SIZE); 3244 3245 /* 3246 * Not all architectures have an exact copy_from_user(). Resort to 3247 * byte at a time. 3248 */ 3249 offset = PAGE_SIZE - left; 3250 while (left) { 3251 char c; 3252 if (get_user(c, (const char __user *)data + offset)) 3253 break; 3254 copy[offset] = c; 3255 left--; 3256 offset++; 3257 } 3258 3259 if (left == PAGE_SIZE) { 3260 kfree(copy); 3261 return ERR_PTR(-EFAULT); 3262 } 3263 3264 return copy; 3265 } 3266 3267 static char *copy_mount_string(const void __user *data) 3268 { 3269 return data ? strndup_user(data, PATH_MAX) : NULL; 3270 } 3271 3272 /* 3273 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to 3274 * be given to the mount() call (ie: read-only, no-dev, no-suid etc). 3275 * 3276 * data is a (void *) that can point to any structure up to 3277 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent 3278 * information (or be NULL). 3279 * 3280 * Pre-0.97 versions of mount() didn't have a flags word. 3281 * When the flags word was introduced its top half was required 3282 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9. 3283 * Therefore, if this magic number is present, it carries no information 3284 * and must be discarded. 3285 */ 3286 int path_mount(const char *dev_name, struct path *path, 3287 const char *type_page, unsigned long flags, void *data_page) 3288 { 3289 unsigned int mnt_flags = 0, sb_flags; 3290 int ret; 3291 3292 /* Discard magic */ 3293 if ((flags & MS_MGC_MSK) == MS_MGC_VAL) 3294 flags &= ~MS_MGC_MSK; 3295 3296 /* Basic sanity checks */ 3297 if (data_page) 3298 ((char *)data_page)[PAGE_SIZE - 1] = 0; 3299 3300 if (flags & MS_NOUSER) 3301 return -EINVAL; 3302 3303 ret = security_sb_mount(dev_name, path, type_page, flags, data_page); 3304 if (ret) 3305 return ret; 3306 if (!may_mount()) 3307 return -EPERM; 3308 if (flags & SB_MANDLOCK) 3309 warn_mandlock(); 3310 3311 /* Default to relatime unless overriden */ 3312 if (!(flags & MS_NOATIME)) 3313 mnt_flags |= MNT_RELATIME; 3314 3315 /* Separate the per-mountpoint flags */ 3316 if (flags & MS_NOSUID) 3317 mnt_flags |= MNT_NOSUID; 3318 if (flags & MS_NODEV) 3319 mnt_flags |= MNT_NODEV; 3320 if (flags & MS_NOEXEC) 3321 mnt_flags |= MNT_NOEXEC; 3322 if (flags & MS_NOATIME) 3323 mnt_flags |= MNT_NOATIME; 3324 if (flags & MS_NODIRATIME) 3325 mnt_flags |= MNT_NODIRATIME; 3326 if (flags & MS_STRICTATIME) 3327 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); 3328 if (flags & MS_RDONLY) 3329 mnt_flags |= MNT_READONLY; 3330 if (flags & MS_NOSYMFOLLOW) 3331 mnt_flags |= MNT_NOSYMFOLLOW; 3332 3333 /* The default atime for remount is preservation */ 3334 if ((flags & MS_REMOUNT) && 3335 ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME | 3336 MS_STRICTATIME)) == 0)) { 3337 mnt_flags &= ~MNT_ATIME_MASK; 3338 mnt_flags |= path->mnt->mnt_flags & MNT_ATIME_MASK; 3339 } 3340 3341 sb_flags = flags & (SB_RDONLY | 3342 SB_SYNCHRONOUS | 3343 SB_MANDLOCK | 3344 SB_DIRSYNC | 3345 SB_SILENT | 3346 SB_POSIXACL | 3347 SB_LAZYTIME | 3348 SB_I_VERSION); 3349 3350 if ((flags & (MS_REMOUNT | MS_BIND)) == (MS_REMOUNT | MS_BIND)) 3351 return do_reconfigure_mnt(path, mnt_flags); 3352 if (flags & MS_REMOUNT) 3353 return do_remount(path, flags, sb_flags, mnt_flags, data_page); 3354 if (flags & MS_BIND) 3355 return do_loopback(path, dev_name, flags & MS_REC); 3356 if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) 3357 return do_change_type(path, flags); 3358 if (flags & MS_MOVE) 3359 return do_move_mount_old(path, dev_name); 3360 3361 return do_new_mount(path, type_page, sb_flags, mnt_flags, dev_name, 3362 data_page); 3363 } 3364 3365 long do_mount(const char *dev_name, const char __user *dir_name, 3366 const char *type_page, unsigned long flags, void *data_page) 3367 { 3368 struct path path; 3369 int ret; 3370 3371 ret = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path); 3372 if (ret) 3373 return ret; 3374 ret = path_mount(dev_name, &path, type_page, flags, data_page); 3375 path_put(&path); 3376 return ret; 3377 } 3378 3379 static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns) 3380 { 3381 return inc_ucount(ns, current_euid(), UCOUNT_MNT_NAMESPACES); 3382 } 3383 3384 static void dec_mnt_namespaces(struct ucounts *ucounts) 3385 { 3386 dec_ucount(ucounts, UCOUNT_MNT_NAMESPACES); 3387 } 3388 3389 static void free_mnt_ns(struct mnt_namespace *ns) 3390 { 3391 if (!is_anon_ns(ns)) 3392 ns_free_inum(&ns->ns); 3393 dec_mnt_namespaces(ns->ucounts); 3394 put_user_ns(ns->user_ns); 3395 kfree(ns); 3396 } 3397 3398 /* 3399 * Assign a sequence number so we can detect when we attempt to bind 3400 * mount a reference to an older mount namespace into the current 3401 * mount namespace, preventing reference counting loops. A 64bit 3402 * number incrementing at 10Ghz will take 12,427 years to wrap which 3403 * is effectively never, so we can ignore the possibility. 3404 */ 3405 static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1); 3406 3407 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool anon) 3408 { 3409 struct mnt_namespace *new_ns; 3410 struct ucounts *ucounts; 3411 int ret; 3412 3413 ucounts = inc_mnt_namespaces(user_ns); 3414 if (!ucounts) 3415 return ERR_PTR(-ENOSPC); 3416 3417 new_ns = kzalloc(sizeof(struct mnt_namespace), GFP_KERNEL_ACCOUNT); 3418 if (!new_ns) { 3419 dec_mnt_namespaces(ucounts); 3420 return ERR_PTR(-ENOMEM); 3421 } 3422 if (!anon) { 3423 ret = ns_alloc_inum(&new_ns->ns); 3424 if (ret) { 3425 kfree(new_ns); 3426 dec_mnt_namespaces(ucounts); 3427 return ERR_PTR(ret); 3428 } 3429 } 3430 new_ns->ns.ops = &mntns_operations; 3431 if (!anon) 3432 new_ns->seq = atomic64_add_return(1, &mnt_ns_seq); 3433 refcount_set(&new_ns->ns.count, 1); 3434 INIT_LIST_HEAD(&new_ns->list); 3435 init_waitqueue_head(&new_ns->poll); 3436 spin_lock_init(&new_ns->ns_lock); 3437 new_ns->user_ns = get_user_ns(user_ns); 3438 new_ns->ucounts = ucounts; 3439 return new_ns; 3440 } 3441 3442 __latent_entropy 3443 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, 3444 struct user_namespace *user_ns, struct fs_struct *new_fs) 3445 { 3446 struct mnt_namespace *new_ns; 3447 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL; 3448 struct mount *p, *q; 3449 struct mount *old; 3450 struct mount *new; 3451 int copy_flags; 3452 3453 BUG_ON(!ns); 3454 3455 if (likely(!(flags & CLONE_NEWNS))) { 3456 get_mnt_ns(ns); 3457 return ns; 3458 } 3459 3460 old = ns->root; 3461 3462 new_ns = alloc_mnt_ns(user_ns, false); 3463 if (IS_ERR(new_ns)) 3464 return new_ns; 3465 3466 namespace_lock(); 3467 /* First pass: copy the tree topology */ 3468 copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE; 3469 if (user_ns != ns->user_ns) 3470 copy_flags |= CL_SHARED_TO_SLAVE; 3471 new = copy_tree(old, old->mnt.mnt_root, copy_flags); 3472 if (IS_ERR(new)) { 3473 namespace_unlock(); 3474 free_mnt_ns(new_ns); 3475 return ERR_CAST(new); 3476 } 3477 if (user_ns != ns->user_ns) { 3478 lock_mount_hash(); 3479 lock_mnt_tree(new); 3480 unlock_mount_hash(); 3481 } 3482 new_ns->root = new; 3483 list_add_tail(&new_ns->list, &new->mnt_list); 3484 3485 /* 3486 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts 3487 * as belonging to new namespace. We have already acquired a private 3488 * fs_struct, so tsk->fs->lock is not needed. 3489 */ 3490 p = old; 3491 q = new; 3492 while (p) { 3493 q->mnt_ns = new_ns; 3494 new_ns->mounts++; 3495 if (new_fs) { 3496 if (&p->mnt == new_fs->root.mnt) { 3497 new_fs->root.mnt = mntget(&q->mnt); 3498 rootmnt = &p->mnt; 3499 } 3500 if (&p->mnt == new_fs->pwd.mnt) { 3501 new_fs->pwd.mnt = mntget(&q->mnt); 3502 pwdmnt = &p->mnt; 3503 } 3504 } 3505 p = next_mnt(p, old); 3506 q = next_mnt(q, new); 3507 if (!q) 3508 break; 3509 // an mntns binding we'd skipped? 3510 while (p->mnt.mnt_root != q->mnt.mnt_root) 3511 p = next_mnt(skip_mnt_tree(p), old); 3512 } 3513 namespace_unlock(); 3514 3515 if (rootmnt) 3516 mntput(rootmnt); 3517 if (pwdmnt) 3518 mntput(pwdmnt); 3519 3520 return new_ns; 3521 } 3522 3523 struct dentry *mount_subtree(struct vfsmount *m, const char *name) 3524 { 3525 struct mount *mnt = real_mount(m); 3526 struct mnt_namespace *ns; 3527 struct super_block *s; 3528 struct path path; 3529 int err; 3530 3531 ns = alloc_mnt_ns(&init_user_ns, true); 3532 if (IS_ERR(ns)) { 3533 mntput(m); 3534 return ERR_CAST(ns); 3535 } 3536 mnt->mnt_ns = ns; 3537 ns->root = mnt; 3538 ns->mounts++; 3539 list_add(&mnt->mnt_list, &ns->list); 3540 3541 err = vfs_path_lookup(m->mnt_root, m, 3542 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); 3543 3544 put_mnt_ns(ns); 3545 3546 if (err) 3547 return ERR_PTR(err); 3548 3549 /* trade a vfsmount reference for active sb one */ 3550 s = path.mnt->mnt_sb; 3551 atomic_inc(&s->s_active); 3552 mntput(path.mnt); 3553 /* lock the sucker */ 3554 down_write(&s->s_umount); 3555 /* ... and return the root of (sub)tree on it */ 3556 return path.dentry; 3557 } 3558 EXPORT_SYMBOL(mount_subtree); 3559 3560 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, 3561 char __user *, type, unsigned long, flags, void __user *, data) 3562 { 3563 int ret; 3564 char *kernel_type; 3565 char *kernel_dev; 3566 void *options; 3567 3568 kernel_type = copy_mount_string(type); 3569 ret = PTR_ERR(kernel_type); 3570 if (IS_ERR(kernel_type)) 3571 goto out_type; 3572 3573 kernel_dev = copy_mount_string(dev_name); 3574 ret = PTR_ERR(kernel_dev); 3575 if (IS_ERR(kernel_dev)) 3576 goto out_dev; 3577 3578 options = copy_mount_options(data); 3579 ret = PTR_ERR(options); 3580 if (IS_ERR(options)) 3581 goto out_data; 3582 3583 ret = do_mount(kernel_dev, dir_name, kernel_type, flags, options); 3584 3585 kfree(options); 3586 out_data: 3587 kfree(kernel_dev); 3588 out_dev: 3589 kfree(kernel_type); 3590 out_type: 3591 return ret; 3592 } 3593 3594 #define FSMOUNT_VALID_FLAGS \ 3595 (MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | \ 3596 MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME | \ 3597 MOUNT_ATTR_NOSYMFOLLOW) 3598 3599 #define MOUNT_SETATTR_VALID_FLAGS (FSMOUNT_VALID_FLAGS | MOUNT_ATTR_IDMAP) 3600 3601 #define MOUNT_SETATTR_PROPAGATION_FLAGS \ 3602 (MS_UNBINDABLE | MS_PRIVATE | MS_SLAVE | MS_SHARED) 3603 3604 static unsigned int attr_flags_to_mnt_flags(u64 attr_flags) 3605 { 3606 unsigned int mnt_flags = 0; 3607 3608 if (attr_flags & MOUNT_ATTR_RDONLY) 3609 mnt_flags |= MNT_READONLY; 3610 if (attr_flags & MOUNT_ATTR_NOSUID) 3611 mnt_flags |= MNT_NOSUID; 3612 if (attr_flags & MOUNT_ATTR_NODEV) 3613 mnt_flags |= MNT_NODEV; 3614 if (attr_flags & MOUNT_ATTR_NOEXEC) 3615 mnt_flags |= MNT_NOEXEC; 3616 if (attr_flags & MOUNT_ATTR_NODIRATIME) 3617 mnt_flags |= MNT_NODIRATIME; 3618 if (attr_flags & MOUNT_ATTR_NOSYMFOLLOW) 3619 mnt_flags |= MNT_NOSYMFOLLOW; 3620 3621 return mnt_flags; 3622 } 3623 3624 /* 3625 * Create a kernel mount representation for a new, prepared superblock 3626 * (specified by fs_fd) and attach to an open_tree-like file descriptor. 3627 */ 3628 SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags, 3629 unsigned int, attr_flags) 3630 { 3631 struct mnt_namespace *ns; 3632 struct fs_context *fc; 3633 struct file *file; 3634 struct path newmount; 3635 struct mount *mnt; 3636 struct fd f; 3637 unsigned int mnt_flags = 0; 3638 long ret; 3639 3640 if (!may_mount()) 3641 return -EPERM; 3642 3643 if ((flags & ~(FSMOUNT_CLOEXEC)) != 0) 3644 return -EINVAL; 3645 3646 if (attr_flags & ~FSMOUNT_VALID_FLAGS) 3647 return -EINVAL; 3648 3649 mnt_flags = attr_flags_to_mnt_flags(attr_flags); 3650 3651 switch (attr_flags & MOUNT_ATTR__ATIME) { 3652 case MOUNT_ATTR_STRICTATIME: 3653 break; 3654 case MOUNT_ATTR_NOATIME: 3655 mnt_flags |= MNT_NOATIME; 3656 break; 3657 case MOUNT_ATTR_RELATIME: 3658 mnt_flags |= MNT_RELATIME; 3659 break; 3660 default: 3661 return -EINVAL; 3662 } 3663 3664 f = fdget(fs_fd); 3665 if (!f.file) 3666 return -EBADF; 3667 3668 ret = -EINVAL; 3669 if (f.file->f_op != &fscontext_fops) 3670 goto err_fsfd; 3671 3672 fc = f.file->private_data; 3673 3674 ret = mutex_lock_interruptible(&fc->uapi_mutex); 3675 if (ret < 0) 3676 goto err_fsfd; 3677 3678 /* There must be a valid superblock or we can't mount it */ 3679 ret = -EINVAL; 3680 if (!fc->root) 3681 goto err_unlock; 3682 3683 ret = -EPERM; 3684 if (mount_too_revealing(fc->root->d_sb, &mnt_flags)) { 3685 pr_warn("VFS: Mount too revealing\n"); 3686 goto err_unlock; 3687 } 3688 3689 ret = -EBUSY; 3690 if (fc->phase != FS_CONTEXT_AWAITING_MOUNT) 3691 goto err_unlock; 3692 3693 if (fc->sb_flags & SB_MANDLOCK) 3694 warn_mandlock(); 3695 3696 newmount.mnt = vfs_create_mount(fc); 3697 if (IS_ERR(newmount.mnt)) { 3698 ret = PTR_ERR(newmount.mnt); 3699 goto err_unlock; 3700 } 3701 newmount.dentry = dget(fc->root); 3702 newmount.mnt->mnt_flags = mnt_flags; 3703 3704 /* We've done the mount bit - now move the file context into more or 3705 * less the same state as if we'd done an fspick(). We don't want to 3706 * do any memory allocation or anything like that at this point as we 3707 * don't want to have to handle any errors incurred. 3708 */ 3709 vfs_clean_context(fc); 3710 3711 ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, true); 3712 if (IS_ERR(ns)) { 3713 ret = PTR_ERR(ns); 3714 goto err_path; 3715 } 3716 mnt = real_mount(newmount.mnt); 3717 mnt->mnt_ns = ns; 3718 ns->root = mnt; 3719 ns->mounts = 1; 3720 list_add(&mnt->mnt_list, &ns->list); 3721 mntget(newmount.mnt); 3722 3723 /* Attach to an apparent O_PATH fd with a note that we need to unmount 3724 * it, not just simply put it. 3725 */ 3726 file = dentry_open(&newmount, O_PATH, fc->cred); 3727 if (IS_ERR(file)) { 3728 dissolve_on_fput(newmount.mnt); 3729 ret = PTR_ERR(file); 3730 goto err_path; 3731 } 3732 file->f_mode |= FMODE_NEED_UNMOUNT; 3733 3734 ret = get_unused_fd_flags((flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0); 3735 if (ret >= 0) 3736 fd_install(ret, file); 3737 else 3738 fput(file); 3739 3740 err_path: 3741 path_put(&newmount); 3742 err_unlock: 3743 mutex_unlock(&fc->uapi_mutex); 3744 err_fsfd: 3745 fdput(f); 3746 return ret; 3747 } 3748 3749 /* 3750 * Move a mount from one place to another. In combination with 3751 * fsopen()/fsmount() this is used to install a new mount and in combination 3752 * with open_tree(OPEN_TREE_CLONE [| AT_RECURSIVE]) it can be used to copy 3753 * a mount subtree. 3754 * 3755 * Note the flags value is a combination of MOVE_MOUNT_* flags. 3756 */ 3757 SYSCALL_DEFINE5(move_mount, 3758 int, from_dfd, const char __user *, from_pathname, 3759 int, to_dfd, const char __user *, to_pathname, 3760 unsigned int, flags) 3761 { 3762 struct path from_path, to_path; 3763 unsigned int lflags; 3764 int ret = 0; 3765 3766 if (!may_mount()) 3767 return -EPERM; 3768 3769 if (flags & ~MOVE_MOUNT__MASK) 3770 return -EINVAL; 3771 3772 /* If someone gives a pathname, they aren't permitted to move 3773 * from an fd that requires unmount as we can't get at the flag 3774 * to clear it afterwards. 3775 */ 3776 lflags = 0; 3777 if (flags & MOVE_MOUNT_F_SYMLINKS) lflags |= LOOKUP_FOLLOW; 3778 if (flags & MOVE_MOUNT_F_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT; 3779 if (flags & MOVE_MOUNT_F_EMPTY_PATH) lflags |= LOOKUP_EMPTY; 3780 3781 ret = user_path_at(from_dfd, from_pathname, lflags, &from_path); 3782 if (ret < 0) 3783 return ret; 3784 3785 lflags = 0; 3786 if (flags & MOVE_MOUNT_T_SYMLINKS) lflags |= LOOKUP_FOLLOW; 3787 if (flags & MOVE_MOUNT_T_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT; 3788 if (flags & MOVE_MOUNT_T_EMPTY_PATH) lflags |= LOOKUP_EMPTY; 3789 3790 ret = user_path_at(to_dfd, to_pathname, lflags, &to_path); 3791 if (ret < 0) 3792 goto out_from; 3793 3794 ret = security_move_mount(&from_path, &to_path); 3795 if (ret < 0) 3796 goto out_to; 3797 3798 if (flags & MOVE_MOUNT_SET_GROUP) 3799 ret = do_set_group(&from_path, &to_path); 3800 else 3801 ret = do_move_mount(&from_path, &to_path); 3802 3803 out_to: 3804 path_put(&to_path); 3805 out_from: 3806 path_put(&from_path); 3807 return ret; 3808 } 3809 3810 /* 3811 * Return true if path is reachable from root 3812 * 3813 * namespace_sem or mount_lock is held 3814 */ 3815 bool is_path_reachable(struct mount *mnt, struct dentry *dentry, 3816 const struct path *root) 3817 { 3818 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) { 3819 dentry = mnt->mnt_mountpoint; 3820 mnt = mnt->mnt_parent; 3821 } 3822 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry); 3823 } 3824 3825 bool path_is_under(const struct path *path1, const struct path *path2) 3826 { 3827 bool res; 3828 read_seqlock_excl(&mount_lock); 3829 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2); 3830 read_sequnlock_excl(&mount_lock); 3831 return res; 3832 } 3833 EXPORT_SYMBOL(path_is_under); 3834 3835 /* 3836 * pivot_root Semantics: 3837 * Moves the root file system of the current process to the directory put_old, 3838 * makes new_root as the new root file system of the current process, and sets 3839 * root/cwd of all processes which had them on the current root to new_root. 3840 * 3841 * Restrictions: 3842 * The new_root and put_old must be directories, and must not be on the 3843 * same file system as the current process root. The put_old must be 3844 * underneath new_root, i.e. adding a non-zero number of /.. to the string 3845 * pointed to by put_old must yield the same directory as new_root. No other 3846 * file system may be mounted on put_old. After all, new_root is a mountpoint. 3847 * 3848 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem. 3849 * See Documentation/filesystems/ramfs-rootfs-initramfs.rst for alternatives 3850 * in this situation. 3851 * 3852 * Notes: 3853 * - we don't move root/cwd if they are not at the root (reason: if something 3854 * cared enough to change them, it's probably wrong to force them elsewhere) 3855 * - it's okay to pick a root that isn't the root of a file system, e.g. 3856 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint, 3857 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root 3858 * first. 3859 */ 3860 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, 3861 const char __user *, put_old) 3862 { 3863 struct path new, old, root; 3864 struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent; 3865 struct mountpoint *old_mp, *root_mp; 3866 int error; 3867 3868 if (!may_mount()) 3869 return -EPERM; 3870 3871 error = user_path_at(AT_FDCWD, new_root, 3872 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &new); 3873 if (error) 3874 goto out0; 3875 3876 error = user_path_at(AT_FDCWD, put_old, 3877 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old); 3878 if (error) 3879 goto out1; 3880 3881 error = security_sb_pivotroot(&old, &new); 3882 if (error) 3883 goto out2; 3884 3885 get_fs_root(current->fs, &root); 3886 old_mp = lock_mount(&old); 3887 error = PTR_ERR(old_mp); 3888 if (IS_ERR(old_mp)) 3889 goto out3; 3890 3891 error = -EINVAL; 3892 new_mnt = real_mount(new.mnt); 3893 root_mnt = real_mount(root.mnt); 3894 old_mnt = real_mount(old.mnt); 3895 ex_parent = new_mnt->mnt_parent; 3896 root_parent = root_mnt->mnt_parent; 3897 if (IS_MNT_SHARED(old_mnt) || 3898 IS_MNT_SHARED(ex_parent) || 3899 IS_MNT_SHARED(root_parent)) 3900 goto out4; 3901 if (!check_mnt(root_mnt) || !check_mnt(new_mnt)) 3902 goto out4; 3903 if (new_mnt->mnt.mnt_flags & MNT_LOCKED) 3904 goto out4; 3905 error = -ENOENT; 3906 if (d_unlinked(new.dentry)) 3907 goto out4; 3908 error = -EBUSY; 3909 if (new_mnt == root_mnt || old_mnt == root_mnt) 3910 goto out4; /* loop, on the same file system */ 3911 error = -EINVAL; 3912 if (root.mnt->mnt_root != root.dentry) 3913 goto out4; /* not a mountpoint */ 3914 if (!mnt_has_parent(root_mnt)) 3915 goto out4; /* not attached */ 3916 if (new.mnt->mnt_root != new.dentry) 3917 goto out4; /* not a mountpoint */ 3918 if (!mnt_has_parent(new_mnt)) 3919 goto out4; /* not attached */ 3920 /* make sure we can reach put_old from new_root */ 3921 if (!is_path_reachable(old_mnt, old.dentry, &new)) 3922 goto out4; 3923 /* make certain new is below the root */ 3924 if (!is_path_reachable(new_mnt, new.dentry, &root)) 3925 goto out4; 3926 lock_mount_hash(); 3927 umount_mnt(new_mnt); 3928 root_mp = unhash_mnt(root_mnt); /* we'll need its mountpoint */ 3929 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) { 3930 new_mnt->mnt.mnt_flags |= MNT_LOCKED; 3931 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED; 3932 } 3933 /* mount old root on put_old */ 3934 attach_mnt(root_mnt, old_mnt, old_mp); 3935 /* mount new_root on / */ 3936 attach_mnt(new_mnt, root_parent, root_mp); 3937 mnt_add_count(root_parent, -1); 3938 touch_mnt_namespace(current->nsproxy->mnt_ns); 3939 /* A moved mount should not expire automatically */ 3940 list_del_init(&new_mnt->mnt_expire); 3941 put_mountpoint(root_mp); 3942 unlock_mount_hash(); 3943 chroot_fs_refs(&root, &new); 3944 error = 0; 3945 out4: 3946 unlock_mount(old_mp); 3947 if (!error) 3948 mntput_no_expire(ex_parent); 3949 out3: 3950 path_put(&root); 3951 out2: 3952 path_put(&old); 3953 out1: 3954 path_put(&new); 3955 out0: 3956 return error; 3957 } 3958 3959 static unsigned int recalc_flags(struct mount_kattr *kattr, struct mount *mnt) 3960 { 3961 unsigned int flags = mnt->mnt.mnt_flags; 3962 3963 /* flags to clear */ 3964 flags &= ~kattr->attr_clr; 3965 /* flags to raise */ 3966 flags |= kattr->attr_set; 3967 3968 return flags; 3969 } 3970 3971 static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt) 3972 { 3973 struct vfsmount *m = &mnt->mnt; 3974 struct user_namespace *fs_userns = m->mnt_sb->s_user_ns; 3975 3976 if (!kattr->mnt_idmap) 3977 return 0; 3978 3979 /* 3980 * Creating an idmapped mount with the filesystem wide idmapping 3981 * doesn't make sense so block that. We don't allow mushy semantics. 3982 */ 3983 if (!check_fsmapping(kattr->mnt_idmap, m->mnt_sb)) 3984 return -EINVAL; 3985 3986 /* 3987 * Once a mount has been idmapped we don't allow it to change its 3988 * mapping. It makes things simpler and callers can just create 3989 * another bind-mount they can idmap if they want to. 3990 */ 3991 if (is_idmapped_mnt(m)) 3992 return -EPERM; 3993 3994 /* The underlying filesystem doesn't support idmapped mounts yet. */ 3995 if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP)) 3996 return -EINVAL; 3997 3998 /* We're not controlling the superblock. */ 3999 if (!ns_capable(fs_userns, CAP_SYS_ADMIN)) 4000 return -EPERM; 4001 4002 /* Mount has already been visible in the filesystem hierarchy. */ 4003 if (!is_anon_ns(mnt->mnt_ns)) 4004 return -EINVAL; 4005 4006 return 0; 4007 } 4008 4009 /** 4010 * mnt_allow_writers() - check whether the attribute change allows writers 4011 * @kattr: the new mount attributes 4012 * @mnt: the mount to which @kattr will be applied 4013 * 4014 * Check whether thew new mount attributes in @kattr allow concurrent writers. 4015 * 4016 * Return: true if writers need to be held, false if not 4017 */ 4018 static inline bool mnt_allow_writers(const struct mount_kattr *kattr, 4019 const struct mount *mnt) 4020 { 4021 return (!(kattr->attr_set & MNT_READONLY) || 4022 (mnt->mnt.mnt_flags & MNT_READONLY)) && 4023 !kattr->mnt_idmap; 4024 } 4025 4026 static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt) 4027 { 4028 struct mount *m; 4029 int err; 4030 4031 for (m = mnt; m; m = next_mnt(m, mnt)) { 4032 if (!can_change_locked_flags(m, recalc_flags(kattr, m))) { 4033 err = -EPERM; 4034 break; 4035 } 4036 4037 err = can_idmap_mount(kattr, m); 4038 if (err) 4039 break; 4040 4041 if (!mnt_allow_writers(kattr, m)) { 4042 err = mnt_hold_writers(m); 4043 if (err) 4044 break; 4045 } 4046 4047 if (!kattr->recurse) 4048 return 0; 4049 } 4050 4051 if (err) { 4052 struct mount *p; 4053 4054 /* 4055 * If we had to call mnt_hold_writers() MNT_WRITE_HOLD will 4056 * be set in @mnt_flags. The loop unsets MNT_WRITE_HOLD for all 4057 * mounts and needs to take care to include the first mount. 4058 */ 4059 for (p = mnt; p; p = next_mnt(p, mnt)) { 4060 /* If we had to hold writers unblock them. */ 4061 if (p->mnt.mnt_flags & MNT_WRITE_HOLD) 4062 mnt_unhold_writers(p); 4063 4064 /* 4065 * We're done once the first mount we changed got 4066 * MNT_WRITE_HOLD unset. 4067 */ 4068 if (p == m) 4069 break; 4070 } 4071 } 4072 return err; 4073 } 4074 4075 static void do_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt) 4076 { 4077 if (!kattr->mnt_idmap) 4078 return; 4079 4080 /* 4081 * Pairs with smp_load_acquire() in mnt_idmap(). 4082 * 4083 * Since we only allow a mount to change the idmapping once and 4084 * verified this in can_idmap_mount() we know that the mount has 4085 * @nop_mnt_idmap attached to it. So there's no need to drop any 4086 * references. 4087 */ 4088 smp_store_release(&mnt->mnt.mnt_idmap, mnt_idmap_get(kattr->mnt_idmap)); 4089 } 4090 4091 static void mount_setattr_commit(struct mount_kattr *kattr, struct mount *mnt) 4092 { 4093 struct mount *m; 4094 4095 for (m = mnt; m; m = next_mnt(m, mnt)) { 4096 unsigned int flags; 4097 4098 do_idmap_mount(kattr, m); 4099 flags = recalc_flags(kattr, m); 4100 WRITE_ONCE(m->mnt.mnt_flags, flags); 4101 4102 /* If we had to hold writers unblock them. */ 4103 if (m->mnt.mnt_flags & MNT_WRITE_HOLD) 4104 mnt_unhold_writers(m); 4105 4106 if (kattr->propagation) 4107 change_mnt_propagation(m, kattr->propagation); 4108 if (!kattr->recurse) 4109 break; 4110 } 4111 touch_mnt_namespace(mnt->mnt_ns); 4112 } 4113 4114 static int do_mount_setattr(struct path *path, struct mount_kattr *kattr) 4115 { 4116 struct mount *mnt = real_mount(path->mnt); 4117 int err = 0; 4118 4119 if (path->dentry != mnt->mnt.mnt_root) 4120 return -EINVAL; 4121 4122 if (kattr->mnt_userns) { 4123 struct mnt_idmap *mnt_idmap; 4124 4125 mnt_idmap = alloc_mnt_idmap(kattr->mnt_userns); 4126 if (IS_ERR(mnt_idmap)) 4127 return PTR_ERR(mnt_idmap); 4128 kattr->mnt_idmap = mnt_idmap; 4129 } 4130 4131 if (kattr->propagation) { 4132 /* 4133 * Only take namespace_lock() if we're actually changing 4134 * propagation. 4135 */ 4136 namespace_lock(); 4137 if (kattr->propagation == MS_SHARED) { 4138 err = invent_group_ids(mnt, kattr->recurse); 4139 if (err) { 4140 namespace_unlock(); 4141 return err; 4142 } 4143 } 4144 } 4145 4146 err = -EINVAL; 4147 lock_mount_hash(); 4148 4149 /* Ensure that this isn't anything purely vfs internal. */ 4150 if (!is_mounted(&mnt->mnt)) 4151 goto out; 4152 4153 /* 4154 * If this is an attached mount make sure it's located in the callers 4155 * mount namespace. If it's not don't let the caller interact with it. 4156 * If this is a detached mount make sure it has an anonymous mount 4157 * namespace attached to it, i.e. we've created it via OPEN_TREE_CLONE. 4158 */ 4159 if (!(mnt_has_parent(mnt) ? check_mnt(mnt) : is_anon_ns(mnt->mnt_ns))) 4160 goto out; 4161 4162 /* 4163 * First, we get the mount tree in a shape where we can change mount 4164 * properties without failure. If we succeeded to do so we commit all 4165 * changes and if we failed we clean up. 4166 */ 4167 err = mount_setattr_prepare(kattr, mnt); 4168 if (!err) 4169 mount_setattr_commit(kattr, mnt); 4170 4171 out: 4172 unlock_mount_hash(); 4173 4174 if (kattr->propagation) { 4175 namespace_unlock(); 4176 if (err) 4177 cleanup_group_ids(mnt, NULL); 4178 } 4179 4180 return err; 4181 } 4182 4183 static int build_mount_idmapped(const struct mount_attr *attr, size_t usize, 4184 struct mount_kattr *kattr, unsigned int flags) 4185 { 4186 int err = 0; 4187 struct ns_common *ns; 4188 struct user_namespace *mnt_userns; 4189 struct file *file; 4190 4191 if (!((attr->attr_set | attr->attr_clr) & MOUNT_ATTR_IDMAP)) 4192 return 0; 4193 4194 /* 4195 * We currently do not support clearing an idmapped mount. If this ever 4196 * is a use-case we can revisit this but for now let's keep it simple 4197 * and not allow it. 4198 */ 4199 if (attr->attr_clr & MOUNT_ATTR_IDMAP) 4200 return -EINVAL; 4201 4202 if (attr->userns_fd > INT_MAX) 4203 return -EINVAL; 4204 4205 file = fget(attr->userns_fd); 4206 if (!file) 4207 return -EBADF; 4208 4209 if (!proc_ns_file(file)) { 4210 err = -EINVAL; 4211 goto out_fput; 4212 } 4213 4214 ns = get_proc_ns(file_inode(file)); 4215 if (ns->ops->type != CLONE_NEWUSER) { 4216 err = -EINVAL; 4217 goto out_fput; 4218 } 4219 4220 /* 4221 * The initial idmapping cannot be used to create an idmapped 4222 * mount. We use the initial idmapping as an indicator of a mount 4223 * that is not idmapped. It can simply be passed into helpers that 4224 * are aware of idmapped mounts as a convenient shortcut. A user 4225 * can just create a dedicated identity mapping to achieve the same 4226 * result. 4227 */ 4228 mnt_userns = container_of(ns, struct user_namespace, ns); 4229 if (mnt_userns == &init_user_ns) { 4230 err = -EPERM; 4231 goto out_fput; 4232 } 4233 4234 /* We're not controlling the target namespace. */ 4235 if (!ns_capable(mnt_userns, CAP_SYS_ADMIN)) { 4236 err = -EPERM; 4237 goto out_fput; 4238 } 4239 4240 kattr->mnt_userns = get_user_ns(mnt_userns); 4241 4242 out_fput: 4243 fput(file); 4244 return err; 4245 } 4246 4247 static int build_mount_kattr(const struct mount_attr *attr, size_t usize, 4248 struct mount_kattr *kattr, unsigned int flags) 4249 { 4250 unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW; 4251 4252 if (flags & AT_NO_AUTOMOUNT) 4253 lookup_flags &= ~LOOKUP_AUTOMOUNT; 4254 if (flags & AT_SYMLINK_NOFOLLOW) 4255 lookup_flags &= ~LOOKUP_FOLLOW; 4256 if (flags & AT_EMPTY_PATH) 4257 lookup_flags |= LOOKUP_EMPTY; 4258 4259 *kattr = (struct mount_kattr) { 4260 .lookup_flags = lookup_flags, 4261 .recurse = !!(flags & AT_RECURSIVE), 4262 }; 4263 4264 if (attr->propagation & ~MOUNT_SETATTR_PROPAGATION_FLAGS) 4265 return -EINVAL; 4266 if (hweight32(attr->propagation & MOUNT_SETATTR_PROPAGATION_FLAGS) > 1) 4267 return -EINVAL; 4268 kattr->propagation = attr->propagation; 4269 4270 if ((attr->attr_set | attr->attr_clr) & ~MOUNT_SETATTR_VALID_FLAGS) 4271 return -EINVAL; 4272 4273 kattr->attr_set = attr_flags_to_mnt_flags(attr->attr_set); 4274 kattr->attr_clr = attr_flags_to_mnt_flags(attr->attr_clr); 4275 4276 /* 4277 * Since the MOUNT_ATTR_<atime> values are an enum, not a bitmap, 4278 * users wanting to transition to a different atime setting cannot 4279 * simply specify the atime setting in @attr_set, but must also 4280 * specify MOUNT_ATTR__ATIME in the @attr_clr field. 4281 * So ensure that MOUNT_ATTR__ATIME can't be partially set in 4282 * @attr_clr and that @attr_set can't have any atime bits set if 4283 * MOUNT_ATTR__ATIME isn't set in @attr_clr. 4284 */ 4285 if (attr->attr_clr & MOUNT_ATTR__ATIME) { 4286 if ((attr->attr_clr & MOUNT_ATTR__ATIME) != MOUNT_ATTR__ATIME) 4287 return -EINVAL; 4288 4289 /* 4290 * Clear all previous time settings as they are mutually 4291 * exclusive. 4292 */ 4293 kattr->attr_clr |= MNT_RELATIME | MNT_NOATIME; 4294 switch (attr->attr_set & MOUNT_ATTR__ATIME) { 4295 case MOUNT_ATTR_RELATIME: 4296 kattr->attr_set |= MNT_RELATIME; 4297 break; 4298 case MOUNT_ATTR_NOATIME: 4299 kattr->attr_set |= MNT_NOATIME; 4300 break; 4301 case MOUNT_ATTR_STRICTATIME: 4302 break; 4303 default: 4304 return -EINVAL; 4305 } 4306 } else { 4307 if (attr->attr_set & MOUNT_ATTR__ATIME) 4308 return -EINVAL; 4309 } 4310 4311 return build_mount_idmapped(attr, usize, kattr, flags); 4312 } 4313 4314 static void finish_mount_kattr(struct mount_kattr *kattr) 4315 { 4316 put_user_ns(kattr->mnt_userns); 4317 kattr->mnt_userns = NULL; 4318 4319 if (kattr->mnt_idmap) 4320 mnt_idmap_put(kattr->mnt_idmap); 4321 } 4322 4323 SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path, 4324 unsigned int, flags, struct mount_attr __user *, uattr, 4325 size_t, usize) 4326 { 4327 int err; 4328 struct path target; 4329 struct mount_attr attr; 4330 struct mount_kattr kattr; 4331 4332 BUILD_BUG_ON(sizeof(struct mount_attr) != MOUNT_ATTR_SIZE_VER0); 4333 4334 if (flags & ~(AT_EMPTY_PATH | 4335 AT_RECURSIVE | 4336 AT_SYMLINK_NOFOLLOW | 4337 AT_NO_AUTOMOUNT)) 4338 return -EINVAL; 4339 4340 if (unlikely(usize > PAGE_SIZE)) 4341 return -E2BIG; 4342 if (unlikely(usize < MOUNT_ATTR_SIZE_VER0)) 4343 return -EINVAL; 4344 4345 if (!may_mount()) 4346 return -EPERM; 4347 4348 err = copy_struct_from_user(&attr, sizeof(attr), uattr, usize); 4349 if (err) 4350 return err; 4351 4352 /* Don't bother walking through the mounts if this is a nop. */ 4353 if (attr.attr_set == 0 && 4354 attr.attr_clr == 0 && 4355 attr.propagation == 0) 4356 return 0; 4357 4358 err = build_mount_kattr(&attr, usize, &kattr, flags); 4359 if (err) 4360 return err; 4361 4362 err = user_path_at(dfd, path, kattr.lookup_flags, &target); 4363 if (!err) { 4364 err = do_mount_setattr(&target, &kattr); 4365 path_put(&target); 4366 } 4367 finish_mount_kattr(&kattr); 4368 return err; 4369 } 4370 4371 static void __init init_mount_tree(void) 4372 { 4373 struct vfsmount *mnt; 4374 struct mount *m; 4375 struct mnt_namespace *ns; 4376 struct path root; 4377 4378 mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL); 4379 if (IS_ERR(mnt)) 4380 panic("Can't create rootfs"); 4381 4382 ns = alloc_mnt_ns(&init_user_ns, false); 4383 if (IS_ERR(ns)) 4384 panic("Can't allocate initial namespace"); 4385 m = real_mount(mnt); 4386 m->mnt_ns = ns; 4387 ns->root = m; 4388 ns->mounts = 1; 4389 list_add(&m->mnt_list, &ns->list); 4390 init_task.nsproxy->mnt_ns = ns; 4391 get_mnt_ns(ns); 4392 4393 root.mnt = mnt; 4394 root.dentry = mnt->mnt_root; 4395 mnt->mnt_flags |= MNT_LOCKED; 4396 4397 set_fs_pwd(current->fs, &root); 4398 set_fs_root(current->fs, &root); 4399 } 4400 4401 void __init mnt_init(void) 4402 { 4403 int err; 4404 4405 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount), 4406 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); 4407 4408 mount_hashtable = alloc_large_system_hash("Mount-cache", 4409 sizeof(struct hlist_head), 4410 mhash_entries, 19, 4411 HASH_ZERO, 4412 &m_hash_shift, &m_hash_mask, 0, 0); 4413 mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache", 4414 sizeof(struct hlist_head), 4415 mphash_entries, 19, 4416 HASH_ZERO, 4417 &mp_hash_shift, &mp_hash_mask, 0, 0); 4418 4419 if (!mount_hashtable || !mountpoint_hashtable) 4420 panic("Failed to allocate mount hash table\n"); 4421 4422 kernfs_init(); 4423 4424 err = sysfs_init(); 4425 if (err) 4426 printk(KERN_WARNING "%s: sysfs_init error: %d\n", 4427 __func__, err); 4428 fs_kobj = kobject_create_and_add("fs", NULL); 4429 if (!fs_kobj) 4430 printk(KERN_WARNING "%s: kobj create error\n", __func__); 4431 shmem_init(); 4432 init_rootfs(); 4433 init_mount_tree(); 4434 } 4435 4436 void put_mnt_ns(struct mnt_namespace *ns) 4437 { 4438 if (!refcount_dec_and_test(&ns->ns.count)) 4439 return; 4440 drop_collected_mounts(&ns->root->mnt); 4441 free_mnt_ns(ns); 4442 } 4443 4444 struct vfsmount *kern_mount(struct file_system_type *type) 4445 { 4446 struct vfsmount *mnt; 4447 mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL); 4448 if (!IS_ERR(mnt)) { 4449 /* 4450 * it is a longterm mount, don't release mnt until 4451 * we unmount before file sys is unregistered 4452 */ 4453 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL; 4454 } 4455 return mnt; 4456 } 4457 EXPORT_SYMBOL_GPL(kern_mount); 4458 4459 void kern_unmount(struct vfsmount *mnt) 4460 { 4461 /* release long term mount so mount point can be released */ 4462 if (!IS_ERR_OR_NULL(mnt)) { 4463 real_mount(mnt)->mnt_ns = NULL; 4464 synchronize_rcu(); /* yecchhh... */ 4465 mntput(mnt); 4466 } 4467 } 4468 EXPORT_SYMBOL(kern_unmount); 4469 4470 void kern_unmount_array(struct vfsmount *mnt[], unsigned int num) 4471 { 4472 unsigned int i; 4473 4474 for (i = 0; i < num; i++) 4475 if (mnt[i]) 4476 real_mount(mnt[i])->mnt_ns = NULL; 4477 synchronize_rcu_expedited(); 4478 for (i = 0; i < num; i++) 4479 mntput(mnt[i]); 4480 } 4481 EXPORT_SYMBOL(kern_unmount_array); 4482 4483 bool our_mnt(struct vfsmount *mnt) 4484 { 4485 return check_mnt(real_mount(mnt)); 4486 } 4487 4488 bool current_chrooted(void) 4489 { 4490 /* Does the current process have a non-standard root */ 4491 struct path ns_root; 4492 struct path fs_root; 4493 bool chrooted; 4494 4495 /* Find the namespace root */ 4496 ns_root.mnt = ¤t->nsproxy->mnt_ns->root->mnt; 4497 ns_root.dentry = ns_root.mnt->mnt_root; 4498 path_get(&ns_root); 4499 while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root)) 4500 ; 4501 4502 get_fs_root(current->fs, &fs_root); 4503 4504 chrooted = !path_equal(&fs_root, &ns_root); 4505 4506 path_put(&fs_root); 4507 path_put(&ns_root); 4508 4509 return chrooted; 4510 } 4511 4512 static bool mnt_already_visible(struct mnt_namespace *ns, 4513 const struct super_block *sb, 4514 int *new_mnt_flags) 4515 { 4516 int new_flags = *new_mnt_flags; 4517 struct mount *mnt; 4518 bool visible = false; 4519 4520 down_read(&namespace_sem); 4521 lock_ns_list(ns); 4522 list_for_each_entry(mnt, &ns->list, mnt_list) { 4523 struct mount *child; 4524 int mnt_flags; 4525 4526 if (mnt_is_cursor(mnt)) 4527 continue; 4528 4529 if (mnt->mnt.mnt_sb->s_type != sb->s_type) 4530 continue; 4531 4532 /* This mount is not fully visible if it's root directory 4533 * is not the root directory of the filesystem. 4534 */ 4535 if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root) 4536 continue; 4537 4538 /* A local view of the mount flags */ 4539 mnt_flags = mnt->mnt.mnt_flags; 4540 4541 /* Don't miss readonly hidden in the superblock flags */ 4542 if (sb_rdonly(mnt->mnt.mnt_sb)) 4543 mnt_flags |= MNT_LOCK_READONLY; 4544 4545 /* Verify the mount flags are equal to or more permissive 4546 * than the proposed new mount. 4547 */ 4548 if ((mnt_flags & MNT_LOCK_READONLY) && 4549 !(new_flags & MNT_READONLY)) 4550 continue; 4551 if ((mnt_flags & MNT_LOCK_ATIME) && 4552 ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK))) 4553 continue; 4554 4555 /* This mount is not fully visible if there are any 4556 * locked child mounts that cover anything except for 4557 * empty directories. 4558 */ 4559 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { 4560 struct inode *inode = child->mnt_mountpoint->d_inode; 4561 /* Only worry about locked mounts */ 4562 if (!(child->mnt.mnt_flags & MNT_LOCKED)) 4563 continue; 4564 /* Is the directory permanetly empty? */ 4565 if (!is_empty_dir_inode(inode)) 4566 goto next; 4567 } 4568 /* Preserve the locked attributes */ 4569 *new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \ 4570 MNT_LOCK_ATIME); 4571 visible = true; 4572 goto found; 4573 next: ; 4574 } 4575 found: 4576 unlock_ns_list(ns); 4577 up_read(&namespace_sem); 4578 return visible; 4579 } 4580 4581 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags) 4582 { 4583 const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV; 4584 struct mnt_namespace *ns = current->nsproxy->mnt_ns; 4585 unsigned long s_iflags; 4586 4587 if (ns->user_ns == &init_user_ns) 4588 return false; 4589 4590 /* Can this filesystem be too revealing? */ 4591 s_iflags = sb->s_iflags; 4592 if (!(s_iflags & SB_I_USERNS_VISIBLE)) 4593 return false; 4594 4595 if ((s_iflags & required_iflags) != required_iflags) { 4596 WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n", 4597 required_iflags); 4598 return true; 4599 } 4600 4601 return !mnt_already_visible(ns, sb, new_mnt_flags); 4602 } 4603 4604 bool mnt_may_suid(struct vfsmount *mnt) 4605 { 4606 /* 4607 * Foreign mounts (accessed via fchdir or through /proc 4608 * symlinks) are always treated as if they are nosuid. This 4609 * prevents namespaces from trusting potentially unsafe 4610 * suid/sgid bits, file caps, or security labels that originate 4611 * in other namespaces. 4612 */ 4613 return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) && 4614 current_in_userns(mnt->mnt_sb->s_user_ns); 4615 } 4616 4617 static struct ns_common *mntns_get(struct task_struct *task) 4618 { 4619 struct ns_common *ns = NULL; 4620 struct nsproxy *nsproxy; 4621 4622 task_lock(task); 4623 nsproxy = task->nsproxy; 4624 if (nsproxy) { 4625 ns = &nsproxy->mnt_ns->ns; 4626 get_mnt_ns(to_mnt_ns(ns)); 4627 } 4628 task_unlock(task); 4629 4630 return ns; 4631 } 4632 4633 static void mntns_put(struct ns_common *ns) 4634 { 4635 put_mnt_ns(to_mnt_ns(ns)); 4636 } 4637 4638 static int mntns_install(struct nsset *nsset, struct ns_common *ns) 4639 { 4640 struct nsproxy *nsproxy = nsset->nsproxy; 4641 struct fs_struct *fs = nsset->fs; 4642 struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns; 4643 struct user_namespace *user_ns = nsset->cred->user_ns; 4644 struct path root; 4645 int err; 4646 4647 if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) || 4648 !ns_capable(user_ns, CAP_SYS_CHROOT) || 4649 !ns_capable(user_ns, CAP_SYS_ADMIN)) 4650 return -EPERM; 4651 4652 if (is_anon_ns(mnt_ns)) 4653 return -EINVAL; 4654 4655 if (fs->users != 1) 4656 return -EINVAL; 4657 4658 get_mnt_ns(mnt_ns); 4659 old_mnt_ns = nsproxy->mnt_ns; 4660 nsproxy->mnt_ns = mnt_ns; 4661 4662 /* Find the root */ 4663 err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt, 4664 "/", LOOKUP_DOWN, &root); 4665 if (err) { 4666 /* revert to old namespace */ 4667 nsproxy->mnt_ns = old_mnt_ns; 4668 put_mnt_ns(mnt_ns); 4669 return err; 4670 } 4671 4672 put_mnt_ns(old_mnt_ns); 4673 4674 /* Update the pwd and root */ 4675 set_fs_pwd(fs, &root); 4676 set_fs_root(fs, &root); 4677 4678 path_put(&root); 4679 return 0; 4680 } 4681 4682 static struct user_namespace *mntns_owner(struct ns_common *ns) 4683 { 4684 return to_mnt_ns(ns)->user_ns; 4685 } 4686 4687 const struct proc_ns_operations mntns_operations = { 4688 .name = "mnt", 4689 .type = CLONE_NEWNS, 4690 .get = mntns_get, 4691 .put = mntns_put, 4692 .install = mntns_install, 4693 .owner = mntns_owner, 4694 }; 4695 4696 #ifdef CONFIG_SYSCTL 4697 static struct ctl_table fs_namespace_sysctls[] = { 4698 { 4699 .procname = "mount-max", 4700 .data = &sysctl_mount_max, 4701 .maxlen = sizeof(unsigned int), 4702 .mode = 0644, 4703 .proc_handler = proc_dointvec_minmax, 4704 .extra1 = SYSCTL_ONE, 4705 }, 4706 { } 4707 }; 4708 4709 static int __init init_fs_namespace_sysctls(void) 4710 { 4711 register_sysctl_init("fs", fs_namespace_sysctls); 4712 return 0; 4713 } 4714 fs_initcall(init_fs_namespace_sysctls); 4715 4716 #endif /* CONFIG_SYSCTL */ 4717