1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/namespace.c 4 * 5 * (C) Copyright Al Viro 2000, 2001 6 * 7 * Based on code from fs/super.c, copyright Linus Torvalds and others. 8 * Heavily rewritten. 9 */ 10 11 #include <linux/syscalls.h> 12 #include <linux/export.h> 13 #include <linux/capability.h> 14 #include <linux/mnt_namespace.h> 15 #include <linux/user_namespace.h> 16 #include <linux/namei.h> 17 #include <linux/security.h> 18 #include <linux/cred.h> 19 #include <linux/idr.h> 20 #include <linux/init.h> /* init_rootfs */ 21 #include <linux/fs_struct.h> /* get_fs_root et.al. */ 22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */ 23 #include <linux/file.h> 24 #include <linux/uaccess.h> 25 #include <linux/proc_ns.h> 26 #include <linux/magic.h> 27 #include <linux/memblock.h> 28 #include <linux/proc_fs.h> 29 #include <linux/task_work.h> 30 #include <linux/sched/task.h> 31 #include <uapi/linux/mount.h> 32 #include <linux/fs_context.h> 33 #include <linux/shmem_fs.h> 34 #include <linux/mnt_idmapping.h> 35 36 #include "pnode.h" 37 #include "internal.h" 38 39 /* Maximum number of mounts in a mount namespace */ 40 static unsigned int sysctl_mount_max __read_mostly = 100000; 41 42 static unsigned int m_hash_mask __read_mostly; 43 static unsigned int m_hash_shift __read_mostly; 44 static unsigned int mp_hash_mask __read_mostly; 45 static unsigned int mp_hash_shift __read_mostly; 46 47 static __initdata unsigned long mhash_entries; 48 static int __init set_mhash_entries(char *str) 49 { 50 if (!str) 51 return 0; 52 mhash_entries = simple_strtoul(str, &str, 0); 53 return 1; 54 } 55 __setup("mhash_entries=", set_mhash_entries); 56 57 static __initdata unsigned long mphash_entries; 58 static int __init set_mphash_entries(char *str) 59 { 60 if (!str) 61 return 0; 62 mphash_entries = simple_strtoul(str, &str, 0); 63 return 1; 64 } 65 __setup("mphash_entries=", set_mphash_entries); 66 67 static u64 event; 68 static DEFINE_IDA(mnt_id_ida); 69 static DEFINE_IDA(mnt_group_ida); 70 71 static struct hlist_head *mount_hashtable __read_mostly; 72 static struct hlist_head *mountpoint_hashtable __read_mostly; 73 static struct kmem_cache *mnt_cache __read_mostly; 74 static DECLARE_RWSEM(namespace_sem); 75 static HLIST_HEAD(unmounted); /* protected by namespace_sem */ 76 static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */ 77 78 struct mount_kattr { 79 unsigned int attr_set; 80 unsigned int attr_clr; 81 unsigned int propagation; 82 unsigned int lookup_flags; 83 bool recurse; 84 struct user_namespace *mnt_userns; 85 struct mnt_idmap *mnt_idmap; 86 }; 87 88 /* /sys/fs */ 89 struct kobject *fs_kobj; 90 EXPORT_SYMBOL_GPL(fs_kobj); 91 92 /* 93 * vfsmount lock may be taken for read to prevent changes to the 94 * vfsmount hash, ie. during mountpoint lookups or walking back 95 * up the tree. 96 * 97 * It should be taken for write in all cases where the vfsmount 98 * tree or hash is modified or when a vfsmount structure is modified. 99 */ 100 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); 101 102 static inline void lock_mount_hash(void) 103 { 104 write_seqlock(&mount_lock); 105 } 106 107 static inline void unlock_mount_hash(void) 108 { 109 write_sequnlock(&mount_lock); 110 } 111 112 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry) 113 { 114 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); 115 tmp += ((unsigned long)dentry / L1_CACHE_BYTES); 116 tmp = tmp + (tmp >> m_hash_shift); 117 return &mount_hashtable[tmp & m_hash_mask]; 118 } 119 120 static inline struct hlist_head *mp_hash(struct dentry *dentry) 121 { 122 unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES); 123 tmp = tmp + (tmp >> mp_hash_shift); 124 return &mountpoint_hashtable[tmp & mp_hash_mask]; 125 } 126 127 static int mnt_alloc_id(struct mount *mnt) 128 { 129 int res = ida_alloc(&mnt_id_ida, GFP_KERNEL); 130 131 if (res < 0) 132 return res; 133 mnt->mnt_id = res; 134 return 0; 135 } 136 137 static void mnt_free_id(struct mount *mnt) 138 { 139 ida_free(&mnt_id_ida, mnt->mnt_id); 140 } 141 142 /* 143 * Allocate a new peer group ID 144 */ 145 static int mnt_alloc_group_id(struct mount *mnt) 146 { 147 int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL); 148 149 if (res < 0) 150 return res; 151 mnt->mnt_group_id = res; 152 return 0; 153 } 154 155 /* 156 * Release a peer group ID 157 */ 158 void mnt_release_group_id(struct mount *mnt) 159 { 160 ida_free(&mnt_group_ida, mnt->mnt_group_id); 161 mnt->mnt_group_id = 0; 162 } 163 164 /* 165 * vfsmount lock must be held for read 166 */ 167 static inline void mnt_add_count(struct mount *mnt, int n) 168 { 169 #ifdef CONFIG_SMP 170 this_cpu_add(mnt->mnt_pcp->mnt_count, n); 171 #else 172 preempt_disable(); 173 mnt->mnt_count += n; 174 preempt_enable(); 175 #endif 176 } 177 178 /* 179 * vfsmount lock must be held for write 180 */ 181 int mnt_get_count(struct mount *mnt) 182 { 183 #ifdef CONFIG_SMP 184 int count = 0; 185 int cpu; 186 187 for_each_possible_cpu(cpu) { 188 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count; 189 } 190 191 return count; 192 #else 193 return mnt->mnt_count; 194 #endif 195 } 196 197 static struct mount *alloc_vfsmnt(const char *name) 198 { 199 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); 200 if (mnt) { 201 int err; 202 203 err = mnt_alloc_id(mnt); 204 if (err) 205 goto out_free_cache; 206 207 if (name) { 208 mnt->mnt_devname = kstrdup_const(name, 209 GFP_KERNEL_ACCOUNT); 210 if (!mnt->mnt_devname) 211 goto out_free_id; 212 } 213 214 #ifdef CONFIG_SMP 215 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp); 216 if (!mnt->mnt_pcp) 217 goto out_free_devname; 218 219 this_cpu_add(mnt->mnt_pcp->mnt_count, 1); 220 #else 221 mnt->mnt_count = 1; 222 mnt->mnt_writers = 0; 223 #endif 224 225 INIT_HLIST_NODE(&mnt->mnt_hash); 226 INIT_LIST_HEAD(&mnt->mnt_child); 227 INIT_LIST_HEAD(&mnt->mnt_mounts); 228 INIT_LIST_HEAD(&mnt->mnt_list); 229 INIT_LIST_HEAD(&mnt->mnt_expire); 230 INIT_LIST_HEAD(&mnt->mnt_share); 231 INIT_LIST_HEAD(&mnt->mnt_slave_list); 232 INIT_LIST_HEAD(&mnt->mnt_slave); 233 INIT_HLIST_NODE(&mnt->mnt_mp_list); 234 INIT_LIST_HEAD(&mnt->mnt_umounting); 235 INIT_HLIST_HEAD(&mnt->mnt_stuck_children); 236 mnt->mnt.mnt_idmap = &nop_mnt_idmap; 237 } 238 return mnt; 239 240 #ifdef CONFIG_SMP 241 out_free_devname: 242 kfree_const(mnt->mnt_devname); 243 #endif 244 out_free_id: 245 mnt_free_id(mnt); 246 out_free_cache: 247 kmem_cache_free(mnt_cache, mnt); 248 return NULL; 249 } 250 251 /* 252 * Most r/o checks on a fs are for operations that take 253 * discrete amounts of time, like a write() or unlink(). 254 * We must keep track of when those operations start 255 * (for permission checks) and when they end, so that 256 * we can determine when writes are able to occur to 257 * a filesystem. 258 */ 259 /* 260 * __mnt_is_readonly: check whether a mount is read-only 261 * @mnt: the mount to check for its write status 262 * 263 * This shouldn't be used directly ouside of the VFS. 264 * It does not guarantee that the filesystem will stay 265 * r/w, just that it is right *now*. This can not and 266 * should not be used in place of IS_RDONLY(inode). 267 * mnt_want/drop_write() will _keep_ the filesystem 268 * r/w. 269 */ 270 bool __mnt_is_readonly(struct vfsmount *mnt) 271 { 272 return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb); 273 } 274 EXPORT_SYMBOL_GPL(__mnt_is_readonly); 275 276 static inline void mnt_inc_writers(struct mount *mnt) 277 { 278 #ifdef CONFIG_SMP 279 this_cpu_inc(mnt->mnt_pcp->mnt_writers); 280 #else 281 mnt->mnt_writers++; 282 #endif 283 } 284 285 static inline void mnt_dec_writers(struct mount *mnt) 286 { 287 #ifdef CONFIG_SMP 288 this_cpu_dec(mnt->mnt_pcp->mnt_writers); 289 #else 290 mnt->mnt_writers--; 291 #endif 292 } 293 294 static unsigned int mnt_get_writers(struct mount *mnt) 295 { 296 #ifdef CONFIG_SMP 297 unsigned int count = 0; 298 int cpu; 299 300 for_each_possible_cpu(cpu) { 301 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers; 302 } 303 304 return count; 305 #else 306 return mnt->mnt_writers; 307 #endif 308 } 309 310 static int mnt_is_readonly(struct vfsmount *mnt) 311 { 312 if (mnt->mnt_sb->s_readonly_remount) 313 return 1; 314 /* Order wrt setting s_flags/s_readonly_remount in do_remount() */ 315 smp_rmb(); 316 return __mnt_is_readonly(mnt); 317 } 318 319 /* 320 * Most r/o & frozen checks on a fs are for operations that take discrete 321 * amounts of time, like a write() or unlink(). We must keep track of when 322 * those operations start (for permission checks) and when they end, so that we 323 * can determine when writes are able to occur to a filesystem. 324 */ 325 /** 326 * __mnt_want_write - get write access to a mount without freeze protection 327 * @m: the mount on which to take a write 328 * 329 * This tells the low-level filesystem that a write is about to be performed to 330 * it, and makes sure that writes are allowed (mnt it read-write) before 331 * returning success. This operation does not protect against filesystem being 332 * frozen. When the write operation is finished, __mnt_drop_write() must be 333 * called. This is effectively a refcount. 334 */ 335 int __mnt_want_write(struct vfsmount *m) 336 { 337 struct mount *mnt = real_mount(m); 338 int ret = 0; 339 340 preempt_disable(); 341 mnt_inc_writers(mnt); 342 /* 343 * The store to mnt_inc_writers must be visible before we pass 344 * MNT_WRITE_HOLD loop below, so that the slowpath can see our 345 * incremented count after it has set MNT_WRITE_HOLD. 346 */ 347 smp_mb(); 348 might_lock(&mount_lock.lock); 349 while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { 350 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { 351 cpu_relax(); 352 } else { 353 /* 354 * This prevents priority inversion, if the task 355 * setting MNT_WRITE_HOLD got preempted on a remote 356 * CPU, and it prevents life lock if the task setting 357 * MNT_WRITE_HOLD has a lower priority and is bound to 358 * the same CPU as the task that is spinning here. 359 */ 360 preempt_enable(); 361 lock_mount_hash(); 362 unlock_mount_hash(); 363 preempt_disable(); 364 } 365 } 366 /* 367 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will 368 * be set to match its requirements. So we must not load that until 369 * MNT_WRITE_HOLD is cleared. 370 */ 371 smp_rmb(); 372 if (mnt_is_readonly(m)) { 373 mnt_dec_writers(mnt); 374 ret = -EROFS; 375 } 376 preempt_enable(); 377 378 return ret; 379 } 380 381 /** 382 * mnt_want_write - get write access to a mount 383 * @m: the mount on which to take a write 384 * 385 * This tells the low-level filesystem that a write is about to be performed to 386 * it, and makes sure that writes are allowed (mount is read-write, filesystem 387 * is not frozen) before returning success. When the write operation is 388 * finished, mnt_drop_write() must be called. This is effectively a refcount. 389 */ 390 int mnt_want_write(struct vfsmount *m) 391 { 392 int ret; 393 394 sb_start_write(m->mnt_sb); 395 ret = __mnt_want_write(m); 396 if (ret) 397 sb_end_write(m->mnt_sb); 398 return ret; 399 } 400 EXPORT_SYMBOL_GPL(mnt_want_write); 401 402 /** 403 * __mnt_want_write_file - get write access to a file's mount 404 * @file: the file who's mount on which to take a write 405 * 406 * This is like __mnt_want_write, but if the file is already open for writing it 407 * skips incrementing mnt_writers (since the open file already has a reference) 408 * and instead only does the check for emergency r/o remounts. This must be 409 * paired with __mnt_drop_write_file. 410 */ 411 int __mnt_want_write_file(struct file *file) 412 { 413 if (file->f_mode & FMODE_WRITER) { 414 /* 415 * Superblock may have become readonly while there are still 416 * writable fd's, e.g. due to a fs error with errors=remount-ro 417 */ 418 if (__mnt_is_readonly(file->f_path.mnt)) 419 return -EROFS; 420 return 0; 421 } 422 return __mnt_want_write(file->f_path.mnt); 423 } 424 425 /** 426 * mnt_want_write_file - get write access to a file's mount 427 * @file: the file who's mount on which to take a write 428 * 429 * This is like mnt_want_write, but if the file is already open for writing it 430 * skips incrementing mnt_writers (since the open file already has a reference) 431 * and instead only does the freeze protection and the check for emergency r/o 432 * remounts. This must be paired with mnt_drop_write_file. 433 */ 434 int mnt_want_write_file(struct file *file) 435 { 436 int ret; 437 438 sb_start_write(file_inode(file)->i_sb); 439 ret = __mnt_want_write_file(file); 440 if (ret) 441 sb_end_write(file_inode(file)->i_sb); 442 return ret; 443 } 444 EXPORT_SYMBOL_GPL(mnt_want_write_file); 445 446 /** 447 * __mnt_drop_write - give up write access to a mount 448 * @mnt: the mount on which to give up write access 449 * 450 * Tells the low-level filesystem that we are done 451 * performing writes to it. Must be matched with 452 * __mnt_want_write() call above. 453 */ 454 void __mnt_drop_write(struct vfsmount *mnt) 455 { 456 preempt_disable(); 457 mnt_dec_writers(real_mount(mnt)); 458 preempt_enable(); 459 } 460 461 /** 462 * mnt_drop_write - give up write access to a mount 463 * @mnt: the mount on which to give up write access 464 * 465 * Tells the low-level filesystem that we are done performing writes to it and 466 * also allows filesystem to be frozen again. Must be matched with 467 * mnt_want_write() call above. 468 */ 469 void mnt_drop_write(struct vfsmount *mnt) 470 { 471 __mnt_drop_write(mnt); 472 sb_end_write(mnt->mnt_sb); 473 } 474 EXPORT_SYMBOL_GPL(mnt_drop_write); 475 476 void __mnt_drop_write_file(struct file *file) 477 { 478 if (!(file->f_mode & FMODE_WRITER)) 479 __mnt_drop_write(file->f_path.mnt); 480 } 481 482 void mnt_drop_write_file(struct file *file) 483 { 484 __mnt_drop_write_file(file); 485 sb_end_write(file_inode(file)->i_sb); 486 } 487 EXPORT_SYMBOL(mnt_drop_write_file); 488 489 /** 490 * mnt_hold_writers - prevent write access to the given mount 491 * @mnt: mnt to prevent write access to 492 * 493 * Prevents write access to @mnt if there are no active writers for @mnt. 494 * This function needs to be called and return successfully before changing 495 * properties of @mnt that need to remain stable for callers with write access 496 * to @mnt. 497 * 498 * After this functions has been called successfully callers must pair it with 499 * a call to mnt_unhold_writers() in order to stop preventing write access to 500 * @mnt. 501 * 502 * Context: This function expects lock_mount_hash() to be held serializing 503 * setting MNT_WRITE_HOLD. 504 * Return: On success 0 is returned. 505 * On error, -EBUSY is returned. 506 */ 507 static inline int mnt_hold_writers(struct mount *mnt) 508 { 509 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; 510 /* 511 * After storing MNT_WRITE_HOLD, we'll read the counters. This store 512 * should be visible before we do. 513 */ 514 smp_mb(); 515 516 /* 517 * With writers on hold, if this value is zero, then there are 518 * definitely no active writers (although held writers may subsequently 519 * increment the count, they'll have to wait, and decrement it after 520 * seeing MNT_READONLY). 521 * 522 * It is OK to have counter incremented on one CPU and decremented on 523 * another: the sum will add up correctly. The danger would be when we 524 * sum up each counter, if we read a counter before it is incremented, 525 * but then read another CPU's count which it has been subsequently 526 * decremented from -- we would see more decrements than we should. 527 * MNT_WRITE_HOLD protects against this scenario, because 528 * mnt_want_write first increments count, then smp_mb, then spins on 529 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while 530 * we're counting up here. 531 */ 532 if (mnt_get_writers(mnt) > 0) 533 return -EBUSY; 534 535 return 0; 536 } 537 538 /** 539 * mnt_unhold_writers - stop preventing write access to the given mount 540 * @mnt: mnt to stop preventing write access to 541 * 542 * Stop preventing write access to @mnt allowing callers to gain write access 543 * to @mnt again. 544 * 545 * This function can only be called after a successful call to 546 * mnt_hold_writers(). 547 * 548 * Context: This function expects lock_mount_hash() to be held. 549 */ 550 static inline void mnt_unhold_writers(struct mount *mnt) 551 { 552 /* 553 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers 554 * that become unheld will see MNT_READONLY. 555 */ 556 smp_wmb(); 557 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; 558 } 559 560 static int mnt_make_readonly(struct mount *mnt) 561 { 562 int ret; 563 564 ret = mnt_hold_writers(mnt); 565 if (!ret) 566 mnt->mnt.mnt_flags |= MNT_READONLY; 567 mnt_unhold_writers(mnt); 568 return ret; 569 } 570 571 int sb_prepare_remount_readonly(struct super_block *sb) 572 { 573 struct mount *mnt; 574 int err = 0; 575 576 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */ 577 if (atomic_long_read(&sb->s_remove_count)) 578 return -EBUSY; 579 580 lock_mount_hash(); 581 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { 582 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) { 583 err = mnt_hold_writers(mnt); 584 if (err) 585 break; 586 } 587 } 588 if (!err && atomic_long_read(&sb->s_remove_count)) 589 err = -EBUSY; 590 591 if (!err) { 592 sb->s_readonly_remount = 1; 593 smp_wmb(); 594 } 595 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { 596 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) 597 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; 598 } 599 unlock_mount_hash(); 600 601 return err; 602 } 603 604 static void free_vfsmnt(struct mount *mnt) 605 { 606 mnt_idmap_put(mnt_idmap(&mnt->mnt)); 607 kfree_const(mnt->mnt_devname); 608 #ifdef CONFIG_SMP 609 free_percpu(mnt->mnt_pcp); 610 #endif 611 kmem_cache_free(mnt_cache, mnt); 612 } 613 614 static void delayed_free_vfsmnt(struct rcu_head *head) 615 { 616 free_vfsmnt(container_of(head, struct mount, mnt_rcu)); 617 } 618 619 /* call under rcu_read_lock */ 620 int __legitimize_mnt(struct vfsmount *bastard, unsigned seq) 621 { 622 struct mount *mnt; 623 if (read_seqretry(&mount_lock, seq)) 624 return 1; 625 if (bastard == NULL) 626 return 0; 627 mnt = real_mount(bastard); 628 mnt_add_count(mnt, 1); 629 smp_mb(); // see mntput_no_expire() 630 if (likely(!read_seqretry(&mount_lock, seq))) 631 return 0; 632 if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { 633 mnt_add_count(mnt, -1); 634 return 1; 635 } 636 lock_mount_hash(); 637 if (unlikely(bastard->mnt_flags & MNT_DOOMED)) { 638 mnt_add_count(mnt, -1); 639 unlock_mount_hash(); 640 return 1; 641 } 642 unlock_mount_hash(); 643 /* caller will mntput() */ 644 return -1; 645 } 646 647 /* call under rcu_read_lock */ 648 static bool legitimize_mnt(struct vfsmount *bastard, unsigned seq) 649 { 650 int res = __legitimize_mnt(bastard, seq); 651 if (likely(!res)) 652 return true; 653 if (unlikely(res < 0)) { 654 rcu_read_unlock(); 655 mntput(bastard); 656 rcu_read_lock(); 657 } 658 return false; 659 } 660 661 /* 662 * find the first mount at @dentry on vfsmount @mnt. 663 * call under rcu_read_lock() 664 */ 665 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) 666 { 667 struct hlist_head *head = m_hash(mnt, dentry); 668 struct mount *p; 669 670 hlist_for_each_entry_rcu(p, head, mnt_hash) 671 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) 672 return p; 673 return NULL; 674 } 675 676 /* 677 * lookup_mnt - Return the first child mount mounted at path 678 * 679 * "First" means first mounted chronologically. If you create the 680 * following mounts: 681 * 682 * mount /dev/sda1 /mnt 683 * mount /dev/sda2 /mnt 684 * mount /dev/sda3 /mnt 685 * 686 * Then lookup_mnt() on the base /mnt dentry in the root mount will 687 * return successively the root dentry and vfsmount of /dev/sda1, then 688 * /dev/sda2, then /dev/sda3, then NULL. 689 * 690 * lookup_mnt takes a reference to the found vfsmount. 691 */ 692 struct vfsmount *lookup_mnt(const struct path *path) 693 { 694 struct mount *child_mnt; 695 struct vfsmount *m; 696 unsigned seq; 697 698 rcu_read_lock(); 699 do { 700 seq = read_seqbegin(&mount_lock); 701 child_mnt = __lookup_mnt(path->mnt, path->dentry); 702 m = child_mnt ? &child_mnt->mnt : NULL; 703 } while (!legitimize_mnt(m, seq)); 704 rcu_read_unlock(); 705 return m; 706 } 707 708 static inline void lock_ns_list(struct mnt_namespace *ns) 709 { 710 spin_lock(&ns->ns_lock); 711 } 712 713 static inline void unlock_ns_list(struct mnt_namespace *ns) 714 { 715 spin_unlock(&ns->ns_lock); 716 } 717 718 static inline bool mnt_is_cursor(struct mount *mnt) 719 { 720 return mnt->mnt.mnt_flags & MNT_CURSOR; 721 } 722 723 /* 724 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the 725 * current mount namespace. 726 * 727 * The common case is dentries are not mountpoints at all and that 728 * test is handled inline. For the slow case when we are actually 729 * dealing with a mountpoint of some kind, walk through all of the 730 * mounts in the current mount namespace and test to see if the dentry 731 * is a mountpoint. 732 * 733 * The mount_hashtable is not usable in the context because we 734 * need to identify all mounts that may be in the current mount 735 * namespace not just a mount that happens to have some specified 736 * parent mount. 737 */ 738 bool __is_local_mountpoint(struct dentry *dentry) 739 { 740 struct mnt_namespace *ns = current->nsproxy->mnt_ns; 741 struct mount *mnt; 742 bool is_covered = false; 743 744 down_read(&namespace_sem); 745 lock_ns_list(ns); 746 list_for_each_entry(mnt, &ns->list, mnt_list) { 747 if (mnt_is_cursor(mnt)) 748 continue; 749 is_covered = (mnt->mnt_mountpoint == dentry); 750 if (is_covered) 751 break; 752 } 753 unlock_ns_list(ns); 754 up_read(&namespace_sem); 755 756 return is_covered; 757 } 758 759 static struct mountpoint *lookup_mountpoint(struct dentry *dentry) 760 { 761 struct hlist_head *chain = mp_hash(dentry); 762 struct mountpoint *mp; 763 764 hlist_for_each_entry(mp, chain, m_hash) { 765 if (mp->m_dentry == dentry) { 766 mp->m_count++; 767 return mp; 768 } 769 } 770 return NULL; 771 } 772 773 static struct mountpoint *get_mountpoint(struct dentry *dentry) 774 { 775 struct mountpoint *mp, *new = NULL; 776 int ret; 777 778 if (d_mountpoint(dentry)) { 779 /* might be worth a WARN_ON() */ 780 if (d_unlinked(dentry)) 781 return ERR_PTR(-ENOENT); 782 mountpoint: 783 read_seqlock_excl(&mount_lock); 784 mp = lookup_mountpoint(dentry); 785 read_sequnlock_excl(&mount_lock); 786 if (mp) 787 goto done; 788 } 789 790 if (!new) 791 new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL); 792 if (!new) 793 return ERR_PTR(-ENOMEM); 794 795 796 /* Exactly one processes may set d_mounted */ 797 ret = d_set_mounted(dentry); 798 799 /* Someone else set d_mounted? */ 800 if (ret == -EBUSY) 801 goto mountpoint; 802 803 /* The dentry is not available as a mountpoint? */ 804 mp = ERR_PTR(ret); 805 if (ret) 806 goto done; 807 808 /* Add the new mountpoint to the hash table */ 809 read_seqlock_excl(&mount_lock); 810 new->m_dentry = dget(dentry); 811 new->m_count = 1; 812 hlist_add_head(&new->m_hash, mp_hash(dentry)); 813 INIT_HLIST_HEAD(&new->m_list); 814 read_sequnlock_excl(&mount_lock); 815 816 mp = new; 817 new = NULL; 818 done: 819 kfree(new); 820 return mp; 821 } 822 823 /* 824 * vfsmount lock must be held. Additionally, the caller is responsible 825 * for serializing calls for given disposal list. 826 */ 827 static void __put_mountpoint(struct mountpoint *mp, struct list_head *list) 828 { 829 if (!--mp->m_count) { 830 struct dentry *dentry = mp->m_dentry; 831 BUG_ON(!hlist_empty(&mp->m_list)); 832 spin_lock(&dentry->d_lock); 833 dentry->d_flags &= ~DCACHE_MOUNTED; 834 spin_unlock(&dentry->d_lock); 835 dput_to_list(dentry, list); 836 hlist_del(&mp->m_hash); 837 kfree(mp); 838 } 839 } 840 841 /* called with namespace_lock and vfsmount lock */ 842 static void put_mountpoint(struct mountpoint *mp) 843 { 844 __put_mountpoint(mp, &ex_mountpoints); 845 } 846 847 static inline int check_mnt(struct mount *mnt) 848 { 849 return mnt->mnt_ns == current->nsproxy->mnt_ns; 850 } 851 852 /* 853 * vfsmount lock must be held for write 854 */ 855 static void touch_mnt_namespace(struct mnt_namespace *ns) 856 { 857 if (ns) { 858 ns->event = ++event; 859 wake_up_interruptible(&ns->poll); 860 } 861 } 862 863 /* 864 * vfsmount lock must be held for write 865 */ 866 static void __touch_mnt_namespace(struct mnt_namespace *ns) 867 { 868 if (ns && ns->event != event) { 869 ns->event = event; 870 wake_up_interruptible(&ns->poll); 871 } 872 } 873 874 /* 875 * vfsmount lock must be held for write 876 */ 877 static struct mountpoint *unhash_mnt(struct mount *mnt) 878 { 879 struct mountpoint *mp; 880 mnt->mnt_parent = mnt; 881 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 882 list_del_init(&mnt->mnt_child); 883 hlist_del_init_rcu(&mnt->mnt_hash); 884 hlist_del_init(&mnt->mnt_mp_list); 885 mp = mnt->mnt_mp; 886 mnt->mnt_mp = NULL; 887 return mp; 888 } 889 890 /* 891 * vfsmount lock must be held for write 892 */ 893 static void umount_mnt(struct mount *mnt) 894 { 895 put_mountpoint(unhash_mnt(mnt)); 896 } 897 898 /* 899 * vfsmount lock must be held for write 900 */ 901 void mnt_set_mountpoint(struct mount *mnt, 902 struct mountpoint *mp, 903 struct mount *child_mnt) 904 { 905 mp->m_count++; 906 mnt_add_count(mnt, 1); /* essentially, that's mntget */ 907 child_mnt->mnt_mountpoint = mp->m_dentry; 908 child_mnt->mnt_parent = mnt; 909 child_mnt->mnt_mp = mp; 910 hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list); 911 } 912 913 static void __attach_mnt(struct mount *mnt, struct mount *parent) 914 { 915 hlist_add_head_rcu(&mnt->mnt_hash, 916 m_hash(&parent->mnt, mnt->mnt_mountpoint)); 917 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); 918 } 919 920 /* 921 * vfsmount lock must be held for write 922 */ 923 static void attach_mnt(struct mount *mnt, 924 struct mount *parent, 925 struct mountpoint *mp) 926 { 927 mnt_set_mountpoint(parent, mp, mnt); 928 __attach_mnt(mnt, parent); 929 } 930 931 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt) 932 { 933 struct mountpoint *old_mp = mnt->mnt_mp; 934 struct mount *old_parent = mnt->mnt_parent; 935 936 list_del_init(&mnt->mnt_child); 937 hlist_del_init(&mnt->mnt_mp_list); 938 hlist_del_init_rcu(&mnt->mnt_hash); 939 940 attach_mnt(mnt, parent, mp); 941 942 put_mountpoint(old_mp); 943 mnt_add_count(old_parent, -1); 944 } 945 946 /* 947 * vfsmount lock must be held for write 948 */ 949 static void commit_tree(struct mount *mnt) 950 { 951 struct mount *parent = mnt->mnt_parent; 952 struct mount *m; 953 LIST_HEAD(head); 954 struct mnt_namespace *n = parent->mnt_ns; 955 956 BUG_ON(parent == mnt); 957 958 list_add_tail(&head, &mnt->mnt_list); 959 list_for_each_entry(m, &head, mnt_list) 960 m->mnt_ns = n; 961 962 list_splice(&head, n->list.prev); 963 964 n->mounts += n->pending_mounts; 965 n->pending_mounts = 0; 966 967 __attach_mnt(mnt, parent); 968 touch_mnt_namespace(n); 969 } 970 971 static struct mount *next_mnt(struct mount *p, struct mount *root) 972 { 973 struct list_head *next = p->mnt_mounts.next; 974 if (next == &p->mnt_mounts) { 975 while (1) { 976 if (p == root) 977 return NULL; 978 next = p->mnt_child.next; 979 if (next != &p->mnt_parent->mnt_mounts) 980 break; 981 p = p->mnt_parent; 982 } 983 } 984 return list_entry(next, struct mount, mnt_child); 985 } 986 987 static struct mount *skip_mnt_tree(struct mount *p) 988 { 989 struct list_head *prev = p->mnt_mounts.prev; 990 while (prev != &p->mnt_mounts) { 991 p = list_entry(prev, struct mount, mnt_child); 992 prev = p->mnt_mounts.prev; 993 } 994 return p; 995 } 996 997 /** 998 * vfs_create_mount - Create a mount for a configured superblock 999 * @fc: The configuration context with the superblock attached 1000 * 1001 * Create a mount to an already configured superblock. If necessary, the 1002 * caller should invoke vfs_get_tree() before calling this. 1003 * 1004 * Note that this does not attach the mount to anything. 1005 */ 1006 struct vfsmount *vfs_create_mount(struct fs_context *fc) 1007 { 1008 struct mount *mnt; 1009 1010 if (!fc->root) 1011 return ERR_PTR(-EINVAL); 1012 1013 mnt = alloc_vfsmnt(fc->source ?: "none"); 1014 if (!mnt) 1015 return ERR_PTR(-ENOMEM); 1016 1017 if (fc->sb_flags & SB_KERNMOUNT) 1018 mnt->mnt.mnt_flags = MNT_INTERNAL; 1019 1020 atomic_inc(&fc->root->d_sb->s_active); 1021 mnt->mnt.mnt_sb = fc->root->d_sb; 1022 mnt->mnt.mnt_root = dget(fc->root); 1023 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 1024 mnt->mnt_parent = mnt; 1025 1026 lock_mount_hash(); 1027 list_add_tail(&mnt->mnt_instance, &mnt->mnt.mnt_sb->s_mounts); 1028 unlock_mount_hash(); 1029 return &mnt->mnt; 1030 } 1031 EXPORT_SYMBOL(vfs_create_mount); 1032 1033 struct vfsmount *fc_mount(struct fs_context *fc) 1034 { 1035 int err = vfs_get_tree(fc); 1036 if (!err) { 1037 up_write(&fc->root->d_sb->s_umount); 1038 return vfs_create_mount(fc); 1039 } 1040 return ERR_PTR(err); 1041 } 1042 EXPORT_SYMBOL(fc_mount); 1043 1044 struct vfsmount *vfs_kern_mount(struct file_system_type *type, 1045 int flags, const char *name, 1046 void *data) 1047 { 1048 struct fs_context *fc; 1049 struct vfsmount *mnt; 1050 int ret = 0; 1051 1052 if (!type) 1053 return ERR_PTR(-EINVAL); 1054 1055 fc = fs_context_for_mount(type, flags); 1056 if (IS_ERR(fc)) 1057 return ERR_CAST(fc); 1058 1059 if (name) 1060 ret = vfs_parse_fs_string(fc, "source", 1061 name, strlen(name)); 1062 if (!ret) 1063 ret = parse_monolithic_mount_data(fc, data); 1064 if (!ret) 1065 mnt = fc_mount(fc); 1066 else 1067 mnt = ERR_PTR(ret); 1068 1069 put_fs_context(fc); 1070 return mnt; 1071 } 1072 EXPORT_SYMBOL_GPL(vfs_kern_mount); 1073 1074 struct vfsmount * 1075 vfs_submount(const struct dentry *mountpoint, struct file_system_type *type, 1076 const char *name, void *data) 1077 { 1078 /* Until it is worked out how to pass the user namespace 1079 * through from the parent mount to the submount don't support 1080 * unprivileged mounts with submounts. 1081 */ 1082 if (mountpoint->d_sb->s_user_ns != &init_user_ns) 1083 return ERR_PTR(-EPERM); 1084 1085 return vfs_kern_mount(type, SB_SUBMOUNT, name, data); 1086 } 1087 EXPORT_SYMBOL_GPL(vfs_submount); 1088 1089 static struct mount *clone_mnt(struct mount *old, struct dentry *root, 1090 int flag) 1091 { 1092 struct super_block *sb = old->mnt.mnt_sb; 1093 struct mount *mnt; 1094 int err; 1095 1096 mnt = alloc_vfsmnt(old->mnt_devname); 1097 if (!mnt) 1098 return ERR_PTR(-ENOMEM); 1099 1100 if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE)) 1101 mnt->mnt_group_id = 0; /* not a peer of original */ 1102 else 1103 mnt->mnt_group_id = old->mnt_group_id; 1104 1105 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { 1106 err = mnt_alloc_group_id(mnt); 1107 if (err) 1108 goto out_free; 1109 } 1110 1111 mnt->mnt.mnt_flags = old->mnt.mnt_flags; 1112 mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL); 1113 1114 atomic_inc(&sb->s_active); 1115 mnt->mnt.mnt_idmap = mnt_idmap_get(mnt_idmap(&old->mnt)); 1116 1117 mnt->mnt.mnt_sb = sb; 1118 mnt->mnt.mnt_root = dget(root); 1119 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 1120 mnt->mnt_parent = mnt; 1121 lock_mount_hash(); 1122 list_add_tail(&mnt->mnt_instance, &sb->s_mounts); 1123 unlock_mount_hash(); 1124 1125 if ((flag & CL_SLAVE) || 1126 ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) { 1127 list_add(&mnt->mnt_slave, &old->mnt_slave_list); 1128 mnt->mnt_master = old; 1129 CLEAR_MNT_SHARED(mnt); 1130 } else if (!(flag & CL_PRIVATE)) { 1131 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old)) 1132 list_add(&mnt->mnt_share, &old->mnt_share); 1133 if (IS_MNT_SLAVE(old)) 1134 list_add(&mnt->mnt_slave, &old->mnt_slave); 1135 mnt->mnt_master = old->mnt_master; 1136 } else { 1137 CLEAR_MNT_SHARED(mnt); 1138 } 1139 if (flag & CL_MAKE_SHARED) 1140 set_mnt_shared(mnt); 1141 1142 /* stick the duplicate mount on the same expiry list 1143 * as the original if that was on one */ 1144 if (flag & CL_EXPIRE) { 1145 if (!list_empty(&old->mnt_expire)) 1146 list_add(&mnt->mnt_expire, &old->mnt_expire); 1147 } 1148 1149 return mnt; 1150 1151 out_free: 1152 mnt_free_id(mnt); 1153 free_vfsmnt(mnt); 1154 return ERR_PTR(err); 1155 } 1156 1157 static void cleanup_mnt(struct mount *mnt) 1158 { 1159 struct hlist_node *p; 1160 struct mount *m; 1161 /* 1162 * The warning here probably indicates that somebody messed 1163 * up a mnt_want/drop_write() pair. If this happens, the 1164 * filesystem was probably unable to make r/w->r/o transitions. 1165 * The locking used to deal with mnt_count decrement provides barriers, 1166 * so mnt_get_writers() below is safe. 1167 */ 1168 WARN_ON(mnt_get_writers(mnt)); 1169 if (unlikely(mnt->mnt_pins.first)) 1170 mnt_pin_kill(mnt); 1171 hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) { 1172 hlist_del(&m->mnt_umount); 1173 mntput(&m->mnt); 1174 } 1175 fsnotify_vfsmount_delete(&mnt->mnt); 1176 dput(mnt->mnt.mnt_root); 1177 deactivate_super(mnt->mnt.mnt_sb); 1178 mnt_free_id(mnt); 1179 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt); 1180 } 1181 1182 static void __cleanup_mnt(struct rcu_head *head) 1183 { 1184 cleanup_mnt(container_of(head, struct mount, mnt_rcu)); 1185 } 1186 1187 static LLIST_HEAD(delayed_mntput_list); 1188 static void delayed_mntput(struct work_struct *unused) 1189 { 1190 struct llist_node *node = llist_del_all(&delayed_mntput_list); 1191 struct mount *m, *t; 1192 1193 llist_for_each_entry_safe(m, t, node, mnt_llist) 1194 cleanup_mnt(m); 1195 } 1196 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput); 1197 1198 static void mntput_no_expire(struct mount *mnt) 1199 { 1200 LIST_HEAD(list); 1201 int count; 1202 1203 rcu_read_lock(); 1204 if (likely(READ_ONCE(mnt->mnt_ns))) { 1205 /* 1206 * Since we don't do lock_mount_hash() here, 1207 * ->mnt_ns can change under us. However, if it's 1208 * non-NULL, then there's a reference that won't 1209 * be dropped until after an RCU delay done after 1210 * turning ->mnt_ns NULL. So if we observe it 1211 * non-NULL under rcu_read_lock(), the reference 1212 * we are dropping is not the final one. 1213 */ 1214 mnt_add_count(mnt, -1); 1215 rcu_read_unlock(); 1216 return; 1217 } 1218 lock_mount_hash(); 1219 /* 1220 * make sure that if __legitimize_mnt() has not seen us grab 1221 * mount_lock, we'll see their refcount increment here. 1222 */ 1223 smp_mb(); 1224 mnt_add_count(mnt, -1); 1225 count = mnt_get_count(mnt); 1226 if (count != 0) { 1227 WARN_ON(count < 0); 1228 rcu_read_unlock(); 1229 unlock_mount_hash(); 1230 return; 1231 } 1232 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) { 1233 rcu_read_unlock(); 1234 unlock_mount_hash(); 1235 return; 1236 } 1237 mnt->mnt.mnt_flags |= MNT_DOOMED; 1238 rcu_read_unlock(); 1239 1240 list_del(&mnt->mnt_instance); 1241 1242 if (unlikely(!list_empty(&mnt->mnt_mounts))) { 1243 struct mount *p, *tmp; 1244 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { 1245 __put_mountpoint(unhash_mnt(p), &list); 1246 hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children); 1247 } 1248 } 1249 unlock_mount_hash(); 1250 shrink_dentry_list(&list); 1251 1252 if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) { 1253 struct task_struct *task = current; 1254 if (likely(!(task->flags & PF_KTHREAD))) { 1255 init_task_work(&mnt->mnt_rcu, __cleanup_mnt); 1256 if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME)) 1257 return; 1258 } 1259 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list)) 1260 schedule_delayed_work(&delayed_mntput_work, 1); 1261 return; 1262 } 1263 cleanup_mnt(mnt); 1264 } 1265 1266 void mntput(struct vfsmount *mnt) 1267 { 1268 if (mnt) { 1269 struct mount *m = real_mount(mnt); 1270 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */ 1271 if (unlikely(m->mnt_expiry_mark)) 1272 m->mnt_expiry_mark = 0; 1273 mntput_no_expire(m); 1274 } 1275 } 1276 EXPORT_SYMBOL(mntput); 1277 1278 struct vfsmount *mntget(struct vfsmount *mnt) 1279 { 1280 if (mnt) 1281 mnt_add_count(real_mount(mnt), 1); 1282 return mnt; 1283 } 1284 EXPORT_SYMBOL(mntget); 1285 1286 /* 1287 * Make a mount point inaccessible to new lookups. 1288 * Because there may still be current users, the caller MUST WAIT 1289 * for an RCU grace period before destroying the mount point. 1290 */ 1291 void mnt_make_shortterm(struct vfsmount *mnt) 1292 { 1293 if (mnt) 1294 real_mount(mnt)->mnt_ns = NULL; 1295 } 1296 1297 /** 1298 * path_is_mountpoint() - Check if path is a mount in the current namespace. 1299 * @path: path to check 1300 * 1301 * d_mountpoint() can only be used reliably to establish if a dentry is 1302 * not mounted in any namespace and that common case is handled inline. 1303 * d_mountpoint() isn't aware of the possibility there may be multiple 1304 * mounts using a given dentry in a different namespace. This function 1305 * checks if the passed in path is a mountpoint rather than the dentry 1306 * alone. 1307 */ 1308 bool path_is_mountpoint(const struct path *path) 1309 { 1310 unsigned seq; 1311 bool res; 1312 1313 if (!d_mountpoint(path->dentry)) 1314 return false; 1315 1316 rcu_read_lock(); 1317 do { 1318 seq = read_seqbegin(&mount_lock); 1319 res = __path_is_mountpoint(path); 1320 } while (read_seqretry(&mount_lock, seq)); 1321 rcu_read_unlock(); 1322 1323 return res; 1324 } 1325 EXPORT_SYMBOL(path_is_mountpoint); 1326 1327 struct vfsmount *mnt_clone_internal(const struct path *path) 1328 { 1329 struct mount *p; 1330 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE); 1331 if (IS_ERR(p)) 1332 return ERR_CAST(p); 1333 p->mnt.mnt_flags |= MNT_INTERNAL; 1334 return &p->mnt; 1335 } 1336 1337 #ifdef CONFIG_PROC_FS 1338 static struct mount *mnt_list_next(struct mnt_namespace *ns, 1339 struct list_head *p) 1340 { 1341 struct mount *mnt, *ret = NULL; 1342 1343 lock_ns_list(ns); 1344 list_for_each_continue(p, &ns->list) { 1345 mnt = list_entry(p, typeof(*mnt), mnt_list); 1346 if (!mnt_is_cursor(mnt)) { 1347 ret = mnt; 1348 break; 1349 } 1350 } 1351 unlock_ns_list(ns); 1352 1353 return ret; 1354 } 1355 1356 /* iterator; we want it to have access to namespace_sem, thus here... */ 1357 static void *m_start(struct seq_file *m, loff_t *pos) 1358 { 1359 struct proc_mounts *p = m->private; 1360 struct list_head *prev; 1361 1362 down_read(&namespace_sem); 1363 if (!*pos) { 1364 prev = &p->ns->list; 1365 } else { 1366 prev = &p->cursor.mnt_list; 1367 1368 /* Read after we'd reached the end? */ 1369 if (list_empty(prev)) 1370 return NULL; 1371 } 1372 1373 return mnt_list_next(p->ns, prev); 1374 } 1375 1376 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 1377 { 1378 struct proc_mounts *p = m->private; 1379 struct mount *mnt = v; 1380 1381 ++*pos; 1382 return mnt_list_next(p->ns, &mnt->mnt_list); 1383 } 1384 1385 static void m_stop(struct seq_file *m, void *v) 1386 { 1387 struct proc_mounts *p = m->private; 1388 struct mount *mnt = v; 1389 1390 lock_ns_list(p->ns); 1391 if (mnt) 1392 list_move_tail(&p->cursor.mnt_list, &mnt->mnt_list); 1393 else 1394 list_del_init(&p->cursor.mnt_list); 1395 unlock_ns_list(p->ns); 1396 up_read(&namespace_sem); 1397 } 1398 1399 static int m_show(struct seq_file *m, void *v) 1400 { 1401 struct proc_mounts *p = m->private; 1402 struct mount *r = v; 1403 return p->show(m, &r->mnt); 1404 } 1405 1406 const struct seq_operations mounts_op = { 1407 .start = m_start, 1408 .next = m_next, 1409 .stop = m_stop, 1410 .show = m_show, 1411 }; 1412 1413 void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor) 1414 { 1415 down_read(&namespace_sem); 1416 lock_ns_list(ns); 1417 list_del(&cursor->mnt_list); 1418 unlock_ns_list(ns); 1419 up_read(&namespace_sem); 1420 } 1421 #endif /* CONFIG_PROC_FS */ 1422 1423 /** 1424 * may_umount_tree - check if a mount tree is busy 1425 * @m: root of mount tree 1426 * 1427 * This is called to check if a tree of mounts has any 1428 * open files, pwds, chroots or sub mounts that are 1429 * busy. 1430 */ 1431 int may_umount_tree(struct vfsmount *m) 1432 { 1433 struct mount *mnt = real_mount(m); 1434 int actual_refs = 0; 1435 int minimum_refs = 0; 1436 struct mount *p; 1437 BUG_ON(!m); 1438 1439 /* write lock needed for mnt_get_count */ 1440 lock_mount_hash(); 1441 for (p = mnt; p; p = next_mnt(p, mnt)) { 1442 actual_refs += mnt_get_count(p); 1443 minimum_refs += 2; 1444 } 1445 unlock_mount_hash(); 1446 1447 if (actual_refs > minimum_refs) 1448 return 0; 1449 1450 return 1; 1451 } 1452 1453 EXPORT_SYMBOL(may_umount_tree); 1454 1455 /** 1456 * may_umount - check if a mount point is busy 1457 * @mnt: root of mount 1458 * 1459 * This is called to check if a mount point has any 1460 * open files, pwds, chroots or sub mounts. If the 1461 * mount has sub mounts this will return busy 1462 * regardless of whether the sub mounts are busy. 1463 * 1464 * Doesn't take quota and stuff into account. IOW, in some cases it will 1465 * give false negatives. The main reason why it's here is that we need 1466 * a non-destructive way to look for easily umountable filesystems. 1467 */ 1468 int may_umount(struct vfsmount *mnt) 1469 { 1470 int ret = 1; 1471 down_read(&namespace_sem); 1472 lock_mount_hash(); 1473 if (propagate_mount_busy(real_mount(mnt), 2)) 1474 ret = 0; 1475 unlock_mount_hash(); 1476 up_read(&namespace_sem); 1477 return ret; 1478 } 1479 1480 EXPORT_SYMBOL(may_umount); 1481 1482 static void namespace_unlock(void) 1483 { 1484 struct hlist_head head; 1485 struct hlist_node *p; 1486 struct mount *m; 1487 LIST_HEAD(list); 1488 1489 hlist_move_list(&unmounted, &head); 1490 list_splice_init(&ex_mountpoints, &list); 1491 1492 up_write(&namespace_sem); 1493 1494 shrink_dentry_list(&list); 1495 1496 if (likely(hlist_empty(&head))) 1497 return; 1498 1499 synchronize_rcu_expedited(); 1500 1501 hlist_for_each_entry_safe(m, p, &head, mnt_umount) { 1502 hlist_del(&m->mnt_umount); 1503 mntput(&m->mnt); 1504 } 1505 } 1506 1507 static inline void namespace_lock(void) 1508 { 1509 down_write(&namespace_sem); 1510 } 1511 1512 enum umount_tree_flags { 1513 UMOUNT_SYNC = 1, 1514 UMOUNT_PROPAGATE = 2, 1515 UMOUNT_CONNECTED = 4, 1516 }; 1517 1518 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how) 1519 { 1520 /* Leaving mounts connected is only valid for lazy umounts */ 1521 if (how & UMOUNT_SYNC) 1522 return true; 1523 1524 /* A mount without a parent has nothing to be connected to */ 1525 if (!mnt_has_parent(mnt)) 1526 return true; 1527 1528 /* Because the reference counting rules change when mounts are 1529 * unmounted and connected, umounted mounts may not be 1530 * connected to mounted mounts. 1531 */ 1532 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) 1533 return true; 1534 1535 /* Has it been requested that the mount remain connected? */ 1536 if (how & UMOUNT_CONNECTED) 1537 return false; 1538 1539 /* Is the mount locked such that it needs to remain connected? */ 1540 if (IS_MNT_LOCKED(mnt)) 1541 return false; 1542 1543 /* By default disconnect the mount */ 1544 return true; 1545 } 1546 1547 /* 1548 * mount_lock must be held 1549 * namespace_sem must be held for write 1550 */ 1551 static void umount_tree(struct mount *mnt, enum umount_tree_flags how) 1552 { 1553 LIST_HEAD(tmp_list); 1554 struct mount *p; 1555 1556 if (how & UMOUNT_PROPAGATE) 1557 propagate_mount_unlock(mnt); 1558 1559 /* Gather the mounts to umount */ 1560 for (p = mnt; p; p = next_mnt(p, mnt)) { 1561 p->mnt.mnt_flags |= MNT_UMOUNT; 1562 list_move(&p->mnt_list, &tmp_list); 1563 } 1564 1565 /* Hide the mounts from mnt_mounts */ 1566 list_for_each_entry(p, &tmp_list, mnt_list) { 1567 list_del_init(&p->mnt_child); 1568 } 1569 1570 /* Add propogated mounts to the tmp_list */ 1571 if (how & UMOUNT_PROPAGATE) 1572 propagate_umount(&tmp_list); 1573 1574 while (!list_empty(&tmp_list)) { 1575 struct mnt_namespace *ns; 1576 bool disconnect; 1577 p = list_first_entry(&tmp_list, struct mount, mnt_list); 1578 list_del_init(&p->mnt_expire); 1579 list_del_init(&p->mnt_list); 1580 ns = p->mnt_ns; 1581 if (ns) { 1582 ns->mounts--; 1583 __touch_mnt_namespace(ns); 1584 } 1585 p->mnt_ns = NULL; 1586 if (how & UMOUNT_SYNC) 1587 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; 1588 1589 disconnect = disconnect_mount(p, how); 1590 if (mnt_has_parent(p)) { 1591 mnt_add_count(p->mnt_parent, -1); 1592 if (!disconnect) { 1593 /* Don't forget about p */ 1594 list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts); 1595 } else { 1596 umount_mnt(p); 1597 } 1598 } 1599 change_mnt_propagation(p, MS_PRIVATE); 1600 if (disconnect) 1601 hlist_add_head(&p->mnt_umount, &unmounted); 1602 } 1603 } 1604 1605 static void shrink_submounts(struct mount *mnt); 1606 1607 static int do_umount_root(struct super_block *sb) 1608 { 1609 int ret = 0; 1610 1611 down_write(&sb->s_umount); 1612 if (!sb_rdonly(sb)) { 1613 struct fs_context *fc; 1614 1615 fc = fs_context_for_reconfigure(sb->s_root, SB_RDONLY, 1616 SB_RDONLY); 1617 if (IS_ERR(fc)) { 1618 ret = PTR_ERR(fc); 1619 } else { 1620 ret = parse_monolithic_mount_data(fc, NULL); 1621 if (!ret) 1622 ret = reconfigure_super(fc); 1623 put_fs_context(fc); 1624 } 1625 } 1626 up_write(&sb->s_umount); 1627 return ret; 1628 } 1629 1630 static int do_umount(struct mount *mnt, int flags) 1631 { 1632 struct super_block *sb = mnt->mnt.mnt_sb; 1633 int retval; 1634 1635 retval = security_sb_umount(&mnt->mnt, flags); 1636 if (retval) 1637 return retval; 1638 1639 /* 1640 * Allow userspace to request a mountpoint be expired rather than 1641 * unmounting unconditionally. Unmount only happens if: 1642 * (1) the mark is already set (the mark is cleared by mntput()) 1643 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] 1644 */ 1645 if (flags & MNT_EXPIRE) { 1646 if (&mnt->mnt == current->fs->root.mnt || 1647 flags & (MNT_FORCE | MNT_DETACH)) 1648 return -EINVAL; 1649 1650 /* 1651 * probably don't strictly need the lock here if we examined 1652 * all race cases, but it's a slowpath. 1653 */ 1654 lock_mount_hash(); 1655 if (mnt_get_count(mnt) != 2) { 1656 unlock_mount_hash(); 1657 return -EBUSY; 1658 } 1659 unlock_mount_hash(); 1660 1661 if (!xchg(&mnt->mnt_expiry_mark, 1)) 1662 return -EAGAIN; 1663 } 1664 1665 /* 1666 * If we may have to abort operations to get out of this 1667 * mount, and they will themselves hold resources we must 1668 * allow the fs to do things. In the Unix tradition of 1669 * 'Gee thats tricky lets do it in userspace' the umount_begin 1670 * might fail to complete on the first run through as other tasks 1671 * must return, and the like. Thats for the mount program to worry 1672 * about for the moment. 1673 */ 1674 1675 if (flags & MNT_FORCE && sb->s_op->umount_begin) { 1676 sb->s_op->umount_begin(sb); 1677 } 1678 1679 /* 1680 * No sense to grab the lock for this test, but test itself looks 1681 * somewhat bogus. Suggestions for better replacement? 1682 * Ho-hum... In principle, we might treat that as umount + switch 1683 * to rootfs. GC would eventually take care of the old vfsmount. 1684 * Actually it makes sense, especially if rootfs would contain a 1685 * /reboot - static binary that would close all descriptors and 1686 * call reboot(9). Then init(8) could umount root and exec /reboot. 1687 */ 1688 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { 1689 /* 1690 * Special case for "unmounting" root ... 1691 * we just try to remount it readonly. 1692 */ 1693 if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) 1694 return -EPERM; 1695 return do_umount_root(sb); 1696 } 1697 1698 namespace_lock(); 1699 lock_mount_hash(); 1700 1701 /* Recheck MNT_LOCKED with the locks held */ 1702 retval = -EINVAL; 1703 if (mnt->mnt.mnt_flags & MNT_LOCKED) 1704 goto out; 1705 1706 event++; 1707 if (flags & MNT_DETACH) { 1708 if (!list_empty(&mnt->mnt_list)) 1709 umount_tree(mnt, UMOUNT_PROPAGATE); 1710 retval = 0; 1711 } else { 1712 shrink_submounts(mnt); 1713 retval = -EBUSY; 1714 if (!propagate_mount_busy(mnt, 2)) { 1715 if (!list_empty(&mnt->mnt_list)) 1716 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); 1717 retval = 0; 1718 } 1719 } 1720 out: 1721 unlock_mount_hash(); 1722 namespace_unlock(); 1723 return retval; 1724 } 1725 1726 /* 1727 * __detach_mounts - lazily unmount all mounts on the specified dentry 1728 * 1729 * During unlink, rmdir, and d_drop it is possible to loose the path 1730 * to an existing mountpoint, and wind up leaking the mount. 1731 * detach_mounts allows lazily unmounting those mounts instead of 1732 * leaking them. 1733 * 1734 * The caller may hold dentry->d_inode->i_mutex. 1735 */ 1736 void __detach_mounts(struct dentry *dentry) 1737 { 1738 struct mountpoint *mp; 1739 struct mount *mnt; 1740 1741 namespace_lock(); 1742 lock_mount_hash(); 1743 mp = lookup_mountpoint(dentry); 1744 if (!mp) 1745 goto out_unlock; 1746 1747 event++; 1748 while (!hlist_empty(&mp->m_list)) { 1749 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); 1750 if (mnt->mnt.mnt_flags & MNT_UMOUNT) { 1751 umount_mnt(mnt); 1752 hlist_add_head(&mnt->mnt_umount, &unmounted); 1753 } 1754 else umount_tree(mnt, UMOUNT_CONNECTED); 1755 } 1756 put_mountpoint(mp); 1757 out_unlock: 1758 unlock_mount_hash(); 1759 namespace_unlock(); 1760 } 1761 1762 /* 1763 * Is the caller allowed to modify his namespace? 1764 */ 1765 bool may_mount(void) 1766 { 1767 return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); 1768 } 1769 1770 static void warn_mandlock(void) 1771 { 1772 pr_warn_once("=======================================================\n" 1773 "WARNING: The mand mount option has been deprecated and\n" 1774 " and is ignored by this kernel. Remove the mand\n" 1775 " option from the mount to silence this warning.\n" 1776 "=======================================================\n"); 1777 } 1778 1779 static int can_umount(const struct path *path, int flags) 1780 { 1781 struct mount *mnt = real_mount(path->mnt); 1782 1783 if (!may_mount()) 1784 return -EPERM; 1785 if (path->dentry != path->mnt->mnt_root) 1786 return -EINVAL; 1787 if (!check_mnt(mnt)) 1788 return -EINVAL; 1789 if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */ 1790 return -EINVAL; 1791 if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN)) 1792 return -EPERM; 1793 return 0; 1794 } 1795 1796 // caller is responsible for flags being sane 1797 int path_umount(struct path *path, int flags) 1798 { 1799 struct mount *mnt = real_mount(path->mnt); 1800 int ret; 1801 1802 ret = can_umount(path, flags); 1803 if (!ret) 1804 ret = do_umount(mnt, flags); 1805 1806 /* we mustn't call path_put() as that would clear mnt_expiry_mark */ 1807 dput(path->dentry); 1808 mntput_no_expire(mnt); 1809 return ret; 1810 } 1811 1812 static int ksys_umount(char __user *name, int flags) 1813 { 1814 int lookup_flags = LOOKUP_MOUNTPOINT; 1815 struct path path; 1816 int ret; 1817 1818 // basic validity checks done first 1819 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) 1820 return -EINVAL; 1821 1822 if (!(flags & UMOUNT_NOFOLLOW)) 1823 lookup_flags |= LOOKUP_FOLLOW; 1824 ret = user_path_at(AT_FDCWD, name, lookup_flags, &path); 1825 if (ret) 1826 return ret; 1827 return path_umount(&path, flags); 1828 } 1829 1830 SYSCALL_DEFINE2(umount, char __user *, name, int, flags) 1831 { 1832 return ksys_umount(name, flags); 1833 } 1834 1835 #ifdef __ARCH_WANT_SYS_OLDUMOUNT 1836 1837 /* 1838 * The 2.0 compatible umount. No flags. 1839 */ 1840 SYSCALL_DEFINE1(oldumount, char __user *, name) 1841 { 1842 return ksys_umount(name, 0); 1843 } 1844 1845 #endif 1846 1847 static bool is_mnt_ns_file(struct dentry *dentry) 1848 { 1849 /* Is this a proxy for a mount namespace? */ 1850 return dentry->d_op == &ns_dentry_operations && 1851 dentry->d_fsdata == &mntns_operations; 1852 } 1853 1854 static struct mnt_namespace *to_mnt_ns(struct ns_common *ns) 1855 { 1856 return container_of(ns, struct mnt_namespace, ns); 1857 } 1858 1859 struct ns_common *from_mnt_ns(struct mnt_namespace *mnt) 1860 { 1861 return &mnt->ns; 1862 } 1863 1864 static bool mnt_ns_loop(struct dentry *dentry) 1865 { 1866 /* Could bind mounting the mount namespace inode cause a 1867 * mount namespace loop? 1868 */ 1869 struct mnt_namespace *mnt_ns; 1870 if (!is_mnt_ns_file(dentry)) 1871 return false; 1872 1873 mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode)); 1874 return current->nsproxy->mnt_ns->seq >= mnt_ns->seq; 1875 } 1876 1877 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, 1878 int flag) 1879 { 1880 struct mount *res, *p, *q, *r, *parent; 1881 1882 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt)) 1883 return ERR_PTR(-EINVAL); 1884 1885 if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry)) 1886 return ERR_PTR(-EINVAL); 1887 1888 res = q = clone_mnt(mnt, dentry, flag); 1889 if (IS_ERR(q)) 1890 return q; 1891 1892 q->mnt_mountpoint = mnt->mnt_mountpoint; 1893 1894 p = mnt; 1895 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) { 1896 struct mount *s; 1897 if (!is_subdir(r->mnt_mountpoint, dentry)) 1898 continue; 1899 1900 for (s = r; s; s = next_mnt(s, r)) { 1901 if (!(flag & CL_COPY_UNBINDABLE) && 1902 IS_MNT_UNBINDABLE(s)) { 1903 if (s->mnt.mnt_flags & MNT_LOCKED) { 1904 /* Both unbindable and locked. */ 1905 q = ERR_PTR(-EPERM); 1906 goto out; 1907 } else { 1908 s = skip_mnt_tree(s); 1909 continue; 1910 } 1911 } 1912 if (!(flag & CL_COPY_MNT_NS_FILE) && 1913 is_mnt_ns_file(s->mnt.mnt_root)) { 1914 s = skip_mnt_tree(s); 1915 continue; 1916 } 1917 while (p != s->mnt_parent) { 1918 p = p->mnt_parent; 1919 q = q->mnt_parent; 1920 } 1921 p = s; 1922 parent = q; 1923 q = clone_mnt(p, p->mnt.mnt_root, flag); 1924 if (IS_ERR(q)) 1925 goto out; 1926 lock_mount_hash(); 1927 list_add_tail(&q->mnt_list, &res->mnt_list); 1928 attach_mnt(q, parent, p->mnt_mp); 1929 unlock_mount_hash(); 1930 } 1931 } 1932 return res; 1933 out: 1934 if (res) { 1935 lock_mount_hash(); 1936 umount_tree(res, UMOUNT_SYNC); 1937 unlock_mount_hash(); 1938 } 1939 return q; 1940 } 1941 1942 /* Caller should check returned pointer for errors */ 1943 1944 struct vfsmount *collect_mounts(const struct path *path) 1945 { 1946 struct mount *tree; 1947 namespace_lock(); 1948 if (!check_mnt(real_mount(path->mnt))) 1949 tree = ERR_PTR(-EINVAL); 1950 else 1951 tree = copy_tree(real_mount(path->mnt), path->dentry, 1952 CL_COPY_ALL | CL_PRIVATE); 1953 namespace_unlock(); 1954 if (IS_ERR(tree)) 1955 return ERR_CAST(tree); 1956 return &tree->mnt; 1957 } 1958 1959 static void free_mnt_ns(struct mnt_namespace *); 1960 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool); 1961 1962 void dissolve_on_fput(struct vfsmount *mnt) 1963 { 1964 struct mnt_namespace *ns; 1965 namespace_lock(); 1966 lock_mount_hash(); 1967 ns = real_mount(mnt)->mnt_ns; 1968 if (ns) { 1969 if (is_anon_ns(ns)) 1970 umount_tree(real_mount(mnt), UMOUNT_CONNECTED); 1971 else 1972 ns = NULL; 1973 } 1974 unlock_mount_hash(); 1975 namespace_unlock(); 1976 if (ns) 1977 free_mnt_ns(ns); 1978 } 1979 1980 void drop_collected_mounts(struct vfsmount *mnt) 1981 { 1982 namespace_lock(); 1983 lock_mount_hash(); 1984 umount_tree(real_mount(mnt), 0); 1985 unlock_mount_hash(); 1986 namespace_unlock(); 1987 } 1988 1989 static bool has_locked_children(struct mount *mnt, struct dentry *dentry) 1990 { 1991 struct mount *child; 1992 1993 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { 1994 if (!is_subdir(child->mnt_mountpoint, dentry)) 1995 continue; 1996 1997 if (child->mnt.mnt_flags & MNT_LOCKED) 1998 return true; 1999 } 2000 return false; 2001 } 2002 2003 /** 2004 * clone_private_mount - create a private clone of a path 2005 * @path: path to clone 2006 * 2007 * This creates a new vfsmount, which will be the clone of @path. The new mount 2008 * will not be attached anywhere in the namespace and will be private (i.e. 2009 * changes to the originating mount won't be propagated into this). 2010 * 2011 * Release with mntput(). 2012 */ 2013 struct vfsmount *clone_private_mount(const struct path *path) 2014 { 2015 struct mount *old_mnt = real_mount(path->mnt); 2016 struct mount *new_mnt; 2017 2018 down_read(&namespace_sem); 2019 if (IS_MNT_UNBINDABLE(old_mnt)) 2020 goto invalid; 2021 2022 if (!check_mnt(old_mnt)) 2023 goto invalid; 2024 2025 if (has_locked_children(old_mnt, path->dentry)) 2026 goto invalid; 2027 2028 new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE); 2029 up_read(&namespace_sem); 2030 2031 if (IS_ERR(new_mnt)) 2032 return ERR_CAST(new_mnt); 2033 2034 /* Longterm mount to be removed by kern_unmount*() */ 2035 new_mnt->mnt_ns = MNT_NS_INTERNAL; 2036 2037 return &new_mnt->mnt; 2038 2039 invalid: 2040 up_read(&namespace_sem); 2041 return ERR_PTR(-EINVAL); 2042 } 2043 EXPORT_SYMBOL_GPL(clone_private_mount); 2044 2045 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, 2046 struct vfsmount *root) 2047 { 2048 struct mount *mnt; 2049 int res = f(root, arg); 2050 if (res) 2051 return res; 2052 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) { 2053 res = f(&mnt->mnt, arg); 2054 if (res) 2055 return res; 2056 } 2057 return 0; 2058 } 2059 2060 static void lock_mnt_tree(struct mount *mnt) 2061 { 2062 struct mount *p; 2063 2064 for (p = mnt; p; p = next_mnt(p, mnt)) { 2065 int flags = p->mnt.mnt_flags; 2066 /* Don't allow unprivileged users to change mount flags */ 2067 flags |= MNT_LOCK_ATIME; 2068 2069 if (flags & MNT_READONLY) 2070 flags |= MNT_LOCK_READONLY; 2071 2072 if (flags & MNT_NODEV) 2073 flags |= MNT_LOCK_NODEV; 2074 2075 if (flags & MNT_NOSUID) 2076 flags |= MNT_LOCK_NOSUID; 2077 2078 if (flags & MNT_NOEXEC) 2079 flags |= MNT_LOCK_NOEXEC; 2080 /* Don't allow unprivileged users to reveal what is under a mount */ 2081 if (list_empty(&p->mnt_expire)) 2082 flags |= MNT_LOCKED; 2083 p->mnt.mnt_flags = flags; 2084 } 2085 } 2086 2087 static void cleanup_group_ids(struct mount *mnt, struct mount *end) 2088 { 2089 struct mount *p; 2090 2091 for (p = mnt; p != end; p = next_mnt(p, mnt)) { 2092 if (p->mnt_group_id && !IS_MNT_SHARED(p)) 2093 mnt_release_group_id(p); 2094 } 2095 } 2096 2097 static int invent_group_ids(struct mount *mnt, bool recurse) 2098 { 2099 struct mount *p; 2100 2101 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) { 2102 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) { 2103 int err = mnt_alloc_group_id(p); 2104 if (err) { 2105 cleanup_group_ids(mnt, p); 2106 return err; 2107 } 2108 } 2109 } 2110 2111 return 0; 2112 } 2113 2114 int count_mounts(struct mnt_namespace *ns, struct mount *mnt) 2115 { 2116 unsigned int max = READ_ONCE(sysctl_mount_max); 2117 unsigned int mounts = 0; 2118 struct mount *p; 2119 2120 if (ns->mounts >= max) 2121 return -ENOSPC; 2122 max -= ns->mounts; 2123 if (ns->pending_mounts >= max) 2124 return -ENOSPC; 2125 max -= ns->pending_mounts; 2126 2127 for (p = mnt; p; p = next_mnt(p, mnt)) 2128 mounts++; 2129 2130 if (mounts > max) 2131 return -ENOSPC; 2132 2133 ns->pending_mounts += mounts; 2134 return 0; 2135 } 2136 2137 /* 2138 * @source_mnt : mount tree to be attached 2139 * @nd : place the mount tree @source_mnt is attached 2140 * @parent_nd : if non-null, detach the source_mnt from its parent and 2141 * store the parent mount and mountpoint dentry. 2142 * (done when source_mnt is moved) 2143 * 2144 * NOTE: in the table below explains the semantics when a source mount 2145 * of a given type is attached to a destination mount of a given type. 2146 * --------------------------------------------------------------------------- 2147 * | BIND MOUNT OPERATION | 2148 * |************************************************************************** 2149 * | source-->| shared | private | slave | unbindable | 2150 * | dest | | | | | 2151 * | | | | | | | 2152 * | v | | | | | 2153 * |************************************************************************** 2154 * | shared | shared (++) | shared (+) | shared(+++)| invalid | 2155 * | | | | | | 2156 * |non-shared| shared (+) | private | slave (*) | invalid | 2157 * *************************************************************************** 2158 * A bind operation clones the source mount and mounts the clone on the 2159 * destination mount. 2160 * 2161 * (++) the cloned mount is propagated to all the mounts in the propagation 2162 * tree of the destination mount and the cloned mount is added to 2163 * the peer group of the source mount. 2164 * (+) the cloned mount is created under the destination mount and is marked 2165 * as shared. The cloned mount is added to the peer group of the source 2166 * mount. 2167 * (+++) the mount is propagated to all the mounts in the propagation tree 2168 * of the destination mount and the cloned mount is made slave 2169 * of the same master as that of the source mount. The cloned mount 2170 * is marked as 'shared and slave'. 2171 * (*) the cloned mount is made a slave of the same master as that of the 2172 * source mount. 2173 * 2174 * --------------------------------------------------------------------------- 2175 * | MOVE MOUNT OPERATION | 2176 * |************************************************************************** 2177 * | source-->| shared | private | slave | unbindable | 2178 * | dest | | | | | 2179 * | | | | | | | 2180 * | v | | | | | 2181 * |************************************************************************** 2182 * | shared | shared (+) | shared (+) | shared(+++) | invalid | 2183 * | | | | | | 2184 * |non-shared| shared (+*) | private | slave (*) | unbindable | 2185 * *************************************************************************** 2186 * 2187 * (+) the mount is moved to the destination. And is then propagated to 2188 * all the mounts in the propagation tree of the destination mount. 2189 * (+*) the mount is moved to the destination. 2190 * (+++) the mount is moved to the destination and is then propagated to 2191 * all the mounts belonging to the destination mount's propagation tree. 2192 * the mount is marked as 'shared and slave'. 2193 * (*) the mount continues to be a slave at the new location. 2194 * 2195 * if the source mount is a tree, the operations explained above is 2196 * applied to each mount in the tree. 2197 * Must be called without spinlocks held, since this function can sleep 2198 * in allocations. 2199 */ 2200 static int attach_recursive_mnt(struct mount *source_mnt, 2201 struct mount *dest_mnt, 2202 struct mountpoint *dest_mp, 2203 bool moving) 2204 { 2205 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; 2206 HLIST_HEAD(tree_list); 2207 struct mnt_namespace *ns = dest_mnt->mnt_ns; 2208 struct mountpoint *smp; 2209 struct mount *child, *p; 2210 struct hlist_node *n; 2211 int err; 2212 2213 /* Preallocate a mountpoint in case the new mounts need 2214 * to be tucked under other mounts. 2215 */ 2216 smp = get_mountpoint(source_mnt->mnt.mnt_root); 2217 if (IS_ERR(smp)) 2218 return PTR_ERR(smp); 2219 2220 /* Is there space to add these mounts to the mount namespace? */ 2221 if (!moving) { 2222 err = count_mounts(ns, source_mnt); 2223 if (err) 2224 goto out; 2225 } 2226 2227 if (IS_MNT_SHARED(dest_mnt)) { 2228 err = invent_group_ids(source_mnt, true); 2229 if (err) 2230 goto out; 2231 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); 2232 lock_mount_hash(); 2233 if (err) 2234 goto out_cleanup_ids; 2235 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 2236 set_mnt_shared(p); 2237 } else { 2238 lock_mount_hash(); 2239 } 2240 if (moving) { 2241 unhash_mnt(source_mnt); 2242 attach_mnt(source_mnt, dest_mnt, dest_mp); 2243 touch_mnt_namespace(source_mnt->mnt_ns); 2244 } else { 2245 if (source_mnt->mnt_ns) { 2246 /* move from anon - the caller will destroy */ 2247 list_del_init(&source_mnt->mnt_ns->list); 2248 } 2249 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); 2250 commit_tree(source_mnt); 2251 } 2252 2253 hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) { 2254 struct mount *q; 2255 hlist_del_init(&child->mnt_hash); 2256 q = __lookup_mnt(&child->mnt_parent->mnt, 2257 child->mnt_mountpoint); 2258 if (q) 2259 mnt_change_mountpoint(child, smp, q); 2260 /* Notice when we are propagating across user namespaces */ 2261 if (child->mnt_parent->mnt_ns->user_ns != user_ns) 2262 lock_mnt_tree(child); 2263 child->mnt.mnt_flags &= ~MNT_LOCKED; 2264 commit_tree(child); 2265 } 2266 put_mountpoint(smp); 2267 unlock_mount_hash(); 2268 2269 return 0; 2270 2271 out_cleanup_ids: 2272 while (!hlist_empty(&tree_list)) { 2273 child = hlist_entry(tree_list.first, struct mount, mnt_hash); 2274 child->mnt_parent->mnt_ns->pending_mounts = 0; 2275 umount_tree(child, UMOUNT_SYNC); 2276 } 2277 unlock_mount_hash(); 2278 cleanup_group_ids(source_mnt, NULL); 2279 out: 2280 ns->pending_mounts = 0; 2281 2282 read_seqlock_excl(&mount_lock); 2283 put_mountpoint(smp); 2284 read_sequnlock_excl(&mount_lock); 2285 2286 return err; 2287 } 2288 2289 static struct mountpoint *lock_mount(struct path *path) 2290 { 2291 struct vfsmount *mnt; 2292 struct dentry *dentry = path->dentry; 2293 retry: 2294 inode_lock(dentry->d_inode); 2295 if (unlikely(cant_mount(dentry))) { 2296 inode_unlock(dentry->d_inode); 2297 return ERR_PTR(-ENOENT); 2298 } 2299 namespace_lock(); 2300 mnt = lookup_mnt(path); 2301 if (likely(!mnt)) { 2302 struct mountpoint *mp = get_mountpoint(dentry); 2303 if (IS_ERR(mp)) { 2304 namespace_unlock(); 2305 inode_unlock(dentry->d_inode); 2306 return mp; 2307 } 2308 return mp; 2309 } 2310 namespace_unlock(); 2311 inode_unlock(path->dentry->d_inode); 2312 path_put(path); 2313 path->mnt = mnt; 2314 dentry = path->dentry = dget(mnt->mnt_root); 2315 goto retry; 2316 } 2317 2318 static void unlock_mount(struct mountpoint *where) 2319 { 2320 struct dentry *dentry = where->m_dentry; 2321 2322 read_seqlock_excl(&mount_lock); 2323 put_mountpoint(where); 2324 read_sequnlock_excl(&mount_lock); 2325 2326 namespace_unlock(); 2327 inode_unlock(dentry->d_inode); 2328 } 2329 2330 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp) 2331 { 2332 if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER) 2333 return -EINVAL; 2334 2335 if (d_is_dir(mp->m_dentry) != 2336 d_is_dir(mnt->mnt.mnt_root)) 2337 return -ENOTDIR; 2338 2339 return attach_recursive_mnt(mnt, p, mp, false); 2340 } 2341 2342 /* 2343 * Sanity check the flags to change_mnt_propagation. 2344 */ 2345 2346 static int flags_to_propagation_type(int ms_flags) 2347 { 2348 int type = ms_flags & ~(MS_REC | MS_SILENT); 2349 2350 /* Fail if any non-propagation flags are set */ 2351 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) 2352 return 0; 2353 /* Only one propagation flag should be set */ 2354 if (!is_power_of_2(type)) 2355 return 0; 2356 return type; 2357 } 2358 2359 /* 2360 * recursively change the type of the mountpoint. 2361 */ 2362 static int do_change_type(struct path *path, int ms_flags) 2363 { 2364 struct mount *m; 2365 struct mount *mnt = real_mount(path->mnt); 2366 int recurse = ms_flags & MS_REC; 2367 int type; 2368 int err = 0; 2369 2370 if (path->dentry != path->mnt->mnt_root) 2371 return -EINVAL; 2372 2373 type = flags_to_propagation_type(ms_flags); 2374 if (!type) 2375 return -EINVAL; 2376 2377 namespace_lock(); 2378 if (type == MS_SHARED) { 2379 err = invent_group_ids(mnt, recurse); 2380 if (err) 2381 goto out_unlock; 2382 } 2383 2384 lock_mount_hash(); 2385 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) 2386 change_mnt_propagation(m, type); 2387 unlock_mount_hash(); 2388 2389 out_unlock: 2390 namespace_unlock(); 2391 return err; 2392 } 2393 2394 static struct mount *__do_loopback(struct path *old_path, int recurse) 2395 { 2396 struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt); 2397 2398 if (IS_MNT_UNBINDABLE(old)) 2399 return mnt; 2400 2401 if (!check_mnt(old) && old_path->dentry->d_op != &ns_dentry_operations) 2402 return mnt; 2403 2404 if (!recurse && has_locked_children(old, old_path->dentry)) 2405 return mnt; 2406 2407 if (recurse) 2408 mnt = copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE); 2409 else 2410 mnt = clone_mnt(old, old_path->dentry, 0); 2411 2412 if (!IS_ERR(mnt)) 2413 mnt->mnt.mnt_flags &= ~MNT_LOCKED; 2414 2415 return mnt; 2416 } 2417 2418 /* 2419 * do loopback mount. 2420 */ 2421 static int do_loopback(struct path *path, const char *old_name, 2422 int recurse) 2423 { 2424 struct path old_path; 2425 struct mount *mnt = NULL, *parent; 2426 struct mountpoint *mp; 2427 int err; 2428 if (!old_name || !*old_name) 2429 return -EINVAL; 2430 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path); 2431 if (err) 2432 return err; 2433 2434 err = -EINVAL; 2435 if (mnt_ns_loop(old_path.dentry)) 2436 goto out; 2437 2438 mp = lock_mount(path); 2439 if (IS_ERR(mp)) { 2440 err = PTR_ERR(mp); 2441 goto out; 2442 } 2443 2444 parent = real_mount(path->mnt); 2445 if (!check_mnt(parent)) 2446 goto out2; 2447 2448 mnt = __do_loopback(&old_path, recurse); 2449 if (IS_ERR(mnt)) { 2450 err = PTR_ERR(mnt); 2451 goto out2; 2452 } 2453 2454 err = graft_tree(mnt, parent, mp); 2455 if (err) { 2456 lock_mount_hash(); 2457 umount_tree(mnt, UMOUNT_SYNC); 2458 unlock_mount_hash(); 2459 } 2460 out2: 2461 unlock_mount(mp); 2462 out: 2463 path_put(&old_path); 2464 return err; 2465 } 2466 2467 static struct file *open_detached_copy(struct path *path, bool recursive) 2468 { 2469 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; 2470 struct mnt_namespace *ns = alloc_mnt_ns(user_ns, true); 2471 struct mount *mnt, *p; 2472 struct file *file; 2473 2474 if (IS_ERR(ns)) 2475 return ERR_CAST(ns); 2476 2477 namespace_lock(); 2478 mnt = __do_loopback(path, recursive); 2479 if (IS_ERR(mnt)) { 2480 namespace_unlock(); 2481 free_mnt_ns(ns); 2482 return ERR_CAST(mnt); 2483 } 2484 2485 lock_mount_hash(); 2486 for (p = mnt; p; p = next_mnt(p, mnt)) { 2487 p->mnt_ns = ns; 2488 ns->mounts++; 2489 } 2490 ns->root = mnt; 2491 list_add_tail(&ns->list, &mnt->mnt_list); 2492 mntget(&mnt->mnt); 2493 unlock_mount_hash(); 2494 namespace_unlock(); 2495 2496 mntput(path->mnt); 2497 path->mnt = &mnt->mnt; 2498 file = dentry_open(path, O_PATH, current_cred()); 2499 if (IS_ERR(file)) 2500 dissolve_on_fput(path->mnt); 2501 else 2502 file->f_mode |= FMODE_NEED_UNMOUNT; 2503 return file; 2504 } 2505 2506 SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags) 2507 { 2508 struct file *file; 2509 struct path path; 2510 int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW; 2511 bool detached = flags & OPEN_TREE_CLONE; 2512 int error; 2513 int fd; 2514 2515 BUILD_BUG_ON(OPEN_TREE_CLOEXEC != O_CLOEXEC); 2516 2517 if (flags & ~(AT_EMPTY_PATH | AT_NO_AUTOMOUNT | AT_RECURSIVE | 2518 AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE | 2519 OPEN_TREE_CLOEXEC)) 2520 return -EINVAL; 2521 2522 if ((flags & (AT_RECURSIVE | OPEN_TREE_CLONE)) == AT_RECURSIVE) 2523 return -EINVAL; 2524 2525 if (flags & AT_NO_AUTOMOUNT) 2526 lookup_flags &= ~LOOKUP_AUTOMOUNT; 2527 if (flags & AT_SYMLINK_NOFOLLOW) 2528 lookup_flags &= ~LOOKUP_FOLLOW; 2529 if (flags & AT_EMPTY_PATH) 2530 lookup_flags |= LOOKUP_EMPTY; 2531 2532 if (detached && !may_mount()) 2533 return -EPERM; 2534 2535 fd = get_unused_fd_flags(flags & O_CLOEXEC); 2536 if (fd < 0) 2537 return fd; 2538 2539 error = user_path_at(dfd, filename, lookup_flags, &path); 2540 if (unlikely(error)) { 2541 file = ERR_PTR(error); 2542 } else { 2543 if (detached) 2544 file = open_detached_copy(&path, flags & AT_RECURSIVE); 2545 else 2546 file = dentry_open(&path, O_PATH, current_cred()); 2547 path_put(&path); 2548 } 2549 if (IS_ERR(file)) { 2550 put_unused_fd(fd); 2551 return PTR_ERR(file); 2552 } 2553 fd_install(fd, file); 2554 return fd; 2555 } 2556 2557 /* 2558 * Don't allow locked mount flags to be cleared. 2559 * 2560 * No locks need to be held here while testing the various MNT_LOCK 2561 * flags because those flags can never be cleared once they are set. 2562 */ 2563 static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags) 2564 { 2565 unsigned int fl = mnt->mnt.mnt_flags; 2566 2567 if ((fl & MNT_LOCK_READONLY) && 2568 !(mnt_flags & MNT_READONLY)) 2569 return false; 2570 2571 if ((fl & MNT_LOCK_NODEV) && 2572 !(mnt_flags & MNT_NODEV)) 2573 return false; 2574 2575 if ((fl & MNT_LOCK_NOSUID) && 2576 !(mnt_flags & MNT_NOSUID)) 2577 return false; 2578 2579 if ((fl & MNT_LOCK_NOEXEC) && 2580 !(mnt_flags & MNT_NOEXEC)) 2581 return false; 2582 2583 if ((fl & MNT_LOCK_ATIME) && 2584 ((fl & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) 2585 return false; 2586 2587 return true; 2588 } 2589 2590 static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags) 2591 { 2592 bool readonly_request = (mnt_flags & MNT_READONLY); 2593 2594 if (readonly_request == __mnt_is_readonly(&mnt->mnt)) 2595 return 0; 2596 2597 if (readonly_request) 2598 return mnt_make_readonly(mnt); 2599 2600 mnt->mnt.mnt_flags &= ~MNT_READONLY; 2601 return 0; 2602 } 2603 2604 static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags) 2605 { 2606 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK; 2607 mnt->mnt.mnt_flags = mnt_flags; 2608 touch_mnt_namespace(mnt->mnt_ns); 2609 } 2610 2611 static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt) 2612 { 2613 struct super_block *sb = mnt->mnt_sb; 2614 2615 if (!__mnt_is_readonly(mnt) && 2616 (!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) && 2617 (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) { 2618 char *buf = (char *)__get_free_page(GFP_KERNEL); 2619 char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(-ENOMEM); 2620 struct tm tm; 2621 2622 time64_to_tm(sb->s_time_max, 0, &tm); 2623 2624 pr_warn("%s filesystem being %s at %s supports timestamps until %04ld (0x%llx)\n", 2625 sb->s_type->name, 2626 is_mounted(mnt) ? "remounted" : "mounted", 2627 mntpath, 2628 tm.tm_year+1900, (unsigned long long)sb->s_time_max); 2629 2630 free_page((unsigned long)buf); 2631 sb->s_iflags |= SB_I_TS_EXPIRY_WARNED; 2632 } 2633 } 2634 2635 /* 2636 * Handle reconfiguration of the mountpoint only without alteration of the 2637 * superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND 2638 * to mount(2). 2639 */ 2640 static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags) 2641 { 2642 struct super_block *sb = path->mnt->mnt_sb; 2643 struct mount *mnt = real_mount(path->mnt); 2644 int ret; 2645 2646 if (!check_mnt(mnt)) 2647 return -EINVAL; 2648 2649 if (path->dentry != mnt->mnt.mnt_root) 2650 return -EINVAL; 2651 2652 if (!can_change_locked_flags(mnt, mnt_flags)) 2653 return -EPERM; 2654 2655 /* 2656 * We're only checking whether the superblock is read-only not 2657 * changing it, so only take down_read(&sb->s_umount). 2658 */ 2659 down_read(&sb->s_umount); 2660 lock_mount_hash(); 2661 ret = change_mount_ro_state(mnt, mnt_flags); 2662 if (ret == 0) 2663 set_mount_attributes(mnt, mnt_flags); 2664 unlock_mount_hash(); 2665 up_read(&sb->s_umount); 2666 2667 mnt_warn_timestamp_expiry(path, &mnt->mnt); 2668 2669 return ret; 2670 } 2671 2672 /* 2673 * change filesystem flags. dir should be a physical root of filesystem. 2674 * If you've mounted a non-root directory somewhere and want to do remount 2675 * on it - tough luck. 2676 */ 2677 static int do_remount(struct path *path, int ms_flags, int sb_flags, 2678 int mnt_flags, void *data) 2679 { 2680 int err; 2681 struct super_block *sb = path->mnt->mnt_sb; 2682 struct mount *mnt = real_mount(path->mnt); 2683 struct fs_context *fc; 2684 2685 if (!check_mnt(mnt)) 2686 return -EINVAL; 2687 2688 if (path->dentry != path->mnt->mnt_root) 2689 return -EINVAL; 2690 2691 if (!can_change_locked_flags(mnt, mnt_flags)) 2692 return -EPERM; 2693 2694 fc = fs_context_for_reconfigure(path->dentry, sb_flags, MS_RMT_MASK); 2695 if (IS_ERR(fc)) 2696 return PTR_ERR(fc); 2697 2698 fc->oldapi = true; 2699 err = parse_monolithic_mount_data(fc, data); 2700 if (!err) { 2701 down_write(&sb->s_umount); 2702 err = -EPERM; 2703 if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) { 2704 err = reconfigure_super(fc); 2705 if (!err) { 2706 lock_mount_hash(); 2707 set_mount_attributes(mnt, mnt_flags); 2708 unlock_mount_hash(); 2709 } 2710 } 2711 up_write(&sb->s_umount); 2712 } 2713 2714 mnt_warn_timestamp_expiry(path, &mnt->mnt); 2715 2716 put_fs_context(fc); 2717 return err; 2718 } 2719 2720 static inline int tree_contains_unbindable(struct mount *mnt) 2721 { 2722 struct mount *p; 2723 for (p = mnt; p; p = next_mnt(p, mnt)) { 2724 if (IS_MNT_UNBINDABLE(p)) 2725 return 1; 2726 } 2727 return 0; 2728 } 2729 2730 /* 2731 * Check that there aren't references to earlier/same mount namespaces in the 2732 * specified subtree. Such references can act as pins for mount namespaces 2733 * that aren't checked by the mount-cycle checking code, thereby allowing 2734 * cycles to be made. 2735 */ 2736 static bool check_for_nsfs_mounts(struct mount *subtree) 2737 { 2738 struct mount *p; 2739 bool ret = false; 2740 2741 lock_mount_hash(); 2742 for (p = subtree; p; p = next_mnt(p, subtree)) 2743 if (mnt_ns_loop(p->mnt.mnt_root)) 2744 goto out; 2745 2746 ret = true; 2747 out: 2748 unlock_mount_hash(); 2749 return ret; 2750 } 2751 2752 static int do_set_group(struct path *from_path, struct path *to_path) 2753 { 2754 struct mount *from, *to; 2755 int err; 2756 2757 from = real_mount(from_path->mnt); 2758 to = real_mount(to_path->mnt); 2759 2760 namespace_lock(); 2761 2762 err = -EINVAL; 2763 /* To and From must be mounted */ 2764 if (!is_mounted(&from->mnt)) 2765 goto out; 2766 if (!is_mounted(&to->mnt)) 2767 goto out; 2768 2769 err = -EPERM; 2770 /* We should be allowed to modify mount namespaces of both mounts */ 2771 if (!ns_capable(from->mnt_ns->user_ns, CAP_SYS_ADMIN)) 2772 goto out; 2773 if (!ns_capable(to->mnt_ns->user_ns, CAP_SYS_ADMIN)) 2774 goto out; 2775 2776 err = -EINVAL; 2777 /* To and From paths should be mount roots */ 2778 if (from_path->dentry != from_path->mnt->mnt_root) 2779 goto out; 2780 if (to_path->dentry != to_path->mnt->mnt_root) 2781 goto out; 2782 2783 /* Setting sharing groups is only allowed across same superblock */ 2784 if (from->mnt.mnt_sb != to->mnt.mnt_sb) 2785 goto out; 2786 2787 /* From mount root should be wider than To mount root */ 2788 if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root)) 2789 goto out; 2790 2791 /* From mount should not have locked children in place of To's root */ 2792 if (has_locked_children(from, to->mnt.mnt_root)) 2793 goto out; 2794 2795 /* Setting sharing groups is only allowed on private mounts */ 2796 if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to)) 2797 goto out; 2798 2799 /* From should not be private */ 2800 if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from)) 2801 goto out; 2802 2803 if (IS_MNT_SLAVE(from)) { 2804 struct mount *m = from->mnt_master; 2805 2806 list_add(&to->mnt_slave, &m->mnt_slave_list); 2807 to->mnt_master = m; 2808 } 2809 2810 if (IS_MNT_SHARED(from)) { 2811 to->mnt_group_id = from->mnt_group_id; 2812 list_add(&to->mnt_share, &from->mnt_share); 2813 lock_mount_hash(); 2814 set_mnt_shared(to); 2815 unlock_mount_hash(); 2816 } 2817 2818 err = 0; 2819 out: 2820 namespace_unlock(); 2821 return err; 2822 } 2823 2824 static int do_move_mount(struct path *old_path, struct path *new_path) 2825 { 2826 struct mnt_namespace *ns; 2827 struct mount *p; 2828 struct mount *old; 2829 struct mount *parent; 2830 struct mountpoint *mp, *old_mp; 2831 int err; 2832 bool attached; 2833 2834 mp = lock_mount(new_path); 2835 if (IS_ERR(mp)) 2836 return PTR_ERR(mp); 2837 2838 old = real_mount(old_path->mnt); 2839 p = real_mount(new_path->mnt); 2840 parent = old->mnt_parent; 2841 attached = mnt_has_parent(old); 2842 old_mp = old->mnt_mp; 2843 ns = old->mnt_ns; 2844 2845 err = -EINVAL; 2846 /* The mountpoint must be in our namespace. */ 2847 if (!check_mnt(p)) 2848 goto out; 2849 2850 /* The thing moved must be mounted... */ 2851 if (!is_mounted(&old->mnt)) 2852 goto out; 2853 2854 /* ... and either ours or the root of anon namespace */ 2855 if (!(attached ? check_mnt(old) : is_anon_ns(ns))) 2856 goto out; 2857 2858 if (old->mnt.mnt_flags & MNT_LOCKED) 2859 goto out; 2860 2861 if (old_path->dentry != old_path->mnt->mnt_root) 2862 goto out; 2863 2864 if (d_is_dir(new_path->dentry) != 2865 d_is_dir(old_path->dentry)) 2866 goto out; 2867 /* 2868 * Don't move a mount residing in a shared parent. 2869 */ 2870 if (attached && IS_MNT_SHARED(parent)) 2871 goto out; 2872 /* 2873 * Don't move a mount tree containing unbindable mounts to a destination 2874 * mount which is shared. 2875 */ 2876 if (IS_MNT_SHARED(p) && tree_contains_unbindable(old)) 2877 goto out; 2878 err = -ELOOP; 2879 if (!check_for_nsfs_mounts(old)) 2880 goto out; 2881 for (; mnt_has_parent(p); p = p->mnt_parent) 2882 if (p == old) 2883 goto out; 2884 2885 err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp, 2886 attached); 2887 if (err) 2888 goto out; 2889 2890 /* if the mount is moved, it should no longer be expire 2891 * automatically */ 2892 list_del_init(&old->mnt_expire); 2893 if (attached) 2894 put_mountpoint(old_mp); 2895 out: 2896 unlock_mount(mp); 2897 if (!err) { 2898 if (attached) 2899 mntput_no_expire(parent); 2900 else 2901 free_mnt_ns(ns); 2902 } 2903 return err; 2904 } 2905 2906 static int do_move_mount_old(struct path *path, const char *old_name) 2907 { 2908 struct path old_path; 2909 int err; 2910 2911 if (!old_name || !*old_name) 2912 return -EINVAL; 2913 2914 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); 2915 if (err) 2916 return err; 2917 2918 err = do_move_mount(&old_path, path); 2919 path_put(&old_path); 2920 return err; 2921 } 2922 2923 /* 2924 * add a mount into a namespace's mount tree 2925 */ 2926 static int do_add_mount(struct mount *newmnt, struct mountpoint *mp, 2927 const struct path *path, int mnt_flags) 2928 { 2929 struct mount *parent = real_mount(path->mnt); 2930 2931 mnt_flags &= ~MNT_INTERNAL_FLAGS; 2932 2933 if (unlikely(!check_mnt(parent))) { 2934 /* that's acceptable only for automounts done in private ns */ 2935 if (!(mnt_flags & MNT_SHRINKABLE)) 2936 return -EINVAL; 2937 /* ... and for those we'd better have mountpoint still alive */ 2938 if (!parent->mnt_ns) 2939 return -EINVAL; 2940 } 2941 2942 /* Refuse the same filesystem on the same mount point */ 2943 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && 2944 path->mnt->mnt_root == path->dentry) 2945 return -EBUSY; 2946 2947 if (d_is_symlink(newmnt->mnt.mnt_root)) 2948 return -EINVAL; 2949 2950 newmnt->mnt.mnt_flags = mnt_flags; 2951 return graft_tree(newmnt, parent, mp); 2952 } 2953 2954 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags); 2955 2956 /* 2957 * Create a new mount using a superblock configuration and request it 2958 * be added to the namespace tree. 2959 */ 2960 static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint, 2961 unsigned int mnt_flags) 2962 { 2963 struct vfsmount *mnt; 2964 struct mountpoint *mp; 2965 struct super_block *sb = fc->root->d_sb; 2966 int error; 2967 2968 error = security_sb_kern_mount(sb); 2969 if (!error && mount_too_revealing(sb, &mnt_flags)) 2970 error = -EPERM; 2971 2972 if (unlikely(error)) { 2973 fc_drop_locked(fc); 2974 return error; 2975 } 2976 2977 up_write(&sb->s_umount); 2978 2979 mnt = vfs_create_mount(fc); 2980 if (IS_ERR(mnt)) 2981 return PTR_ERR(mnt); 2982 2983 mnt_warn_timestamp_expiry(mountpoint, mnt); 2984 2985 mp = lock_mount(mountpoint); 2986 if (IS_ERR(mp)) { 2987 mntput(mnt); 2988 return PTR_ERR(mp); 2989 } 2990 error = do_add_mount(real_mount(mnt), mp, mountpoint, mnt_flags); 2991 unlock_mount(mp); 2992 if (error < 0) 2993 mntput(mnt); 2994 return error; 2995 } 2996 2997 /* 2998 * create a new mount for userspace and request it to be added into the 2999 * namespace's tree 3000 */ 3001 static int do_new_mount(struct path *path, const char *fstype, int sb_flags, 3002 int mnt_flags, const char *name, void *data) 3003 { 3004 struct file_system_type *type; 3005 struct fs_context *fc; 3006 const char *subtype = NULL; 3007 int err = 0; 3008 3009 if (!fstype) 3010 return -EINVAL; 3011 3012 type = get_fs_type(fstype); 3013 if (!type) 3014 return -ENODEV; 3015 3016 if (type->fs_flags & FS_HAS_SUBTYPE) { 3017 subtype = strchr(fstype, '.'); 3018 if (subtype) { 3019 subtype++; 3020 if (!*subtype) { 3021 put_filesystem(type); 3022 return -EINVAL; 3023 } 3024 } 3025 } 3026 3027 fc = fs_context_for_mount(type, sb_flags); 3028 put_filesystem(type); 3029 if (IS_ERR(fc)) 3030 return PTR_ERR(fc); 3031 3032 if (subtype) 3033 err = vfs_parse_fs_string(fc, "subtype", 3034 subtype, strlen(subtype)); 3035 if (!err && name) 3036 err = vfs_parse_fs_string(fc, "source", name, strlen(name)); 3037 if (!err) 3038 err = parse_monolithic_mount_data(fc, data); 3039 if (!err && !mount_capable(fc)) 3040 err = -EPERM; 3041 if (!err) 3042 err = vfs_get_tree(fc); 3043 if (!err) 3044 err = do_new_mount_fc(fc, path, mnt_flags); 3045 3046 put_fs_context(fc); 3047 return err; 3048 } 3049 3050 int finish_automount(struct vfsmount *m, const struct path *path) 3051 { 3052 struct dentry *dentry = path->dentry; 3053 struct mountpoint *mp; 3054 struct mount *mnt; 3055 int err; 3056 3057 if (!m) 3058 return 0; 3059 if (IS_ERR(m)) 3060 return PTR_ERR(m); 3061 3062 mnt = real_mount(m); 3063 /* The new mount record should have at least 2 refs to prevent it being 3064 * expired before we get a chance to add it 3065 */ 3066 BUG_ON(mnt_get_count(mnt) < 2); 3067 3068 if (m->mnt_sb == path->mnt->mnt_sb && 3069 m->mnt_root == dentry) { 3070 err = -ELOOP; 3071 goto discard; 3072 } 3073 3074 /* 3075 * we don't want to use lock_mount() - in this case finding something 3076 * that overmounts our mountpoint to be means "quitely drop what we've 3077 * got", not "try to mount it on top". 3078 */ 3079 inode_lock(dentry->d_inode); 3080 namespace_lock(); 3081 if (unlikely(cant_mount(dentry))) { 3082 err = -ENOENT; 3083 goto discard_locked; 3084 } 3085 rcu_read_lock(); 3086 if (unlikely(__lookup_mnt(path->mnt, dentry))) { 3087 rcu_read_unlock(); 3088 err = 0; 3089 goto discard_locked; 3090 } 3091 rcu_read_unlock(); 3092 mp = get_mountpoint(dentry); 3093 if (IS_ERR(mp)) { 3094 err = PTR_ERR(mp); 3095 goto discard_locked; 3096 } 3097 3098 err = do_add_mount(mnt, mp, path, path->mnt->mnt_flags | MNT_SHRINKABLE); 3099 unlock_mount(mp); 3100 if (unlikely(err)) 3101 goto discard; 3102 mntput(m); 3103 return 0; 3104 3105 discard_locked: 3106 namespace_unlock(); 3107 inode_unlock(dentry->d_inode); 3108 discard: 3109 /* remove m from any expiration list it may be on */ 3110 if (!list_empty(&mnt->mnt_expire)) { 3111 namespace_lock(); 3112 list_del_init(&mnt->mnt_expire); 3113 namespace_unlock(); 3114 } 3115 mntput(m); 3116 mntput(m); 3117 return err; 3118 } 3119 3120 /** 3121 * mnt_set_expiry - Put a mount on an expiration list 3122 * @mnt: The mount to list. 3123 * @expiry_list: The list to add the mount to. 3124 */ 3125 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) 3126 { 3127 namespace_lock(); 3128 3129 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); 3130 3131 namespace_unlock(); 3132 } 3133 EXPORT_SYMBOL(mnt_set_expiry); 3134 3135 /* 3136 * process a list of expirable mountpoints with the intent of discarding any 3137 * mountpoints that aren't in use and haven't been touched since last we came 3138 * here 3139 */ 3140 void mark_mounts_for_expiry(struct list_head *mounts) 3141 { 3142 struct mount *mnt, *next; 3143 LIST_HEAD(graveyard); 3144 3145 if (list_empty(mounts)) 3146 return; 3147 3148 namespace_lock(); 3149 lock_mount_hash(); 3150 3151 /* extract from the expiration list every vfsmount that matches the 3152 * following criteria: 3153 * - only referenced by its parent vfsmount 3154 * - still marked for expiry (marked on the last call here; marks are 3155 * cleared by mntput()) 3156 */ 3157 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) { 3158 if (!xchg(&mnt->mnt_expiry_mark, 1) || 3159 propagate_mount_busy(mnt, 1)) 3160 continue; 3161 list_move(&mnt->mnt_expire, &graveyard); 3162 } 3163 while (!list_empty(&graveyard)) { 3164 mnt = list_first_entry(&graveyard, struct mount, mnt_expire); 3165 touch_mnt_namespace(mnt->mnt_ns); 3166 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); 3167 } 3168 unlock_mount_hash(); 3169 namespace_unlock(); 3170 } 3171 3172 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry); 3173 3174 /* 3175 * Ripoff of 'select_parent()' 3176 * 3177 * search the list of submounts for a given mountpoint, and move any 3178 * shrinkable submounts to the 'graveyard' list. 3179 */ 3180 static int select_submounts(struct mount *parent, struct list_head *graveyard) 3181 { 3182 struct mount *this_parent = parent; 3183 struct list_head *next; 3184 int found = 0; 3185 3186 repeat: 3187 next = this_parent->mnt_mounts.next; 3188 resume: 3189 while (next != &this_parent->mnt_mounts) { 3190 struct list_head *tmp = next; 3191 struct mount *mnt = list_entry(tmp, struct mount, mnt_child); 3192 3193 next = tmp->next; 3194 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE)) 3195 continue; 3196 /* 3197 * Descend a level if the d_mounts list is non-empty. 3198 */ 3199 if (!list_empty(&mnt->mnt_mounts)) { 3200 this_parent = mnt; 3201 goto repeat; 3202 } 3203 3204 if (!propagate_mount_busy(mnt, 1)) { 3205 list_move_tail(&mnt->mnt_expire, graveyard); 3206 found++; 3207 } 3208 } 3209 /* 3210 * All done at this level ... ascend and resume the search 3211 */ 3212 if (this_parent != parent) { 3213 next = this_parent->mnt_child.next; 3214 this_parent = this_parent->mnt_parent; 3215 goto resume; 3216 } 3217 return found; 3218 } 3219 3220 /* 3221 * process a list of expirable mountpoints with the intent of discarding any 3222 * submounts of a specific parent mountpoint 3223 * 3224 * mount_lock must be held for write 3225 */ 3226 static void shrink_submounts(struct mount *mnt) 3227 { 3228 LIST_HEAD(graveyard); 3229 struct mount *m; 3230 3231 /* extract submounts of 'mountpoint' from the expiration list */ 3232 while (select_submounts(mnt, &graveyard)) { 3233 while (!list_empty(&graveyard)) { 3234 m = list_first_entry(&graveyard, struct mount, 3235 mnt_expire); 3236 touch_mnt_namespace(m->mnt_ns); 3237 umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC); 3238 } 3239 } 3240 } 3241 3242 static void *copy_mount_options(const void __user * data) 3243 { 3244 char *copy; 3245 unsigned left, offset; 3246 3247 if (!data) 3248 return NULL; 3249 3250 copy = kmalloc(PAGE_SIZE, GFP_KERNEL); 3251 if (!copy) 3252 return ERR_PTR(-ENOMEM); 3253 3254 left = copy_from_user(copy, data, PAGE_SIZE); 3255 3256 /* 3257 * Not all architectures have an exact copy_from_user(). Resort to 3258 * byte at a time. 3259 */ 3260 offset = PAGE_SIZE - left; 3261 while (left) { 3262 char c; 3263 if (get_user(c, (const char __user *)data + offset)) 3264 break; 3265 copy[offset] = c; 3266 left--; 3267 offset++; 3268 } 3269 3270 if (left == PAGE_SIZE) { 3271 kfree(copy); 3272 return ERR_PTR(-EFAULT); 3273 } 3274 3275 return copy; 3276 } 3277 3278 static char *copy_mount_string(const void __user *data) 3279 { 3280 return data ? strndup_user(data, PATH_MAX) : NULL; 3281 } 3282 3283 /* 3284 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to 3285 * be given to the mount() call (ie: read-only, no-dev, no-suid etc). 3286 * 3287 * data is a (void *) that can point to any structure up to 3288 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent 3289 * information (or be NULL). 3290 * 3291 * Pre-0.97 versions of mount() didn't have a flags word. 3292 * When the flags word was introduced its top half was required 3293 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9. 3294 * Therefore, if this magic number is present, it carries no information 3295 * and must be discarded. 3296 */ 3297 int path_mount(const char *dev_name, struct path *path, 3298 const char *type_page, unsigned long flags, void *data_page) 3299 { 3300 unsigned int mnt_flags = 0, sb_flags; 3301 int ret; 3302 3303 /* Discard magic */ 3304 if ((flags & MS_MGC_MSK) == MS_MGC_VAL) 3305 flags &= ~MS_MGC_MSK; 3306 3307 /* Basic sanity checks */ 3308 if (data_page) 3309 ((char *)data_page)[PAGE_SIZE - 1] = 0; 3310 3311 if (flags & MS_NOUSER) 3312 return -EINVAL; 3313 3314 ret = security_sb_mount(dev_name, path, type_page, flags, data_page); 3315 if (ret) 3316 return ret; 3317 if (!may_mount()) 3318 return -EPERM; 3319 if (flags & SB_MANDLOCK) 3320 warn_mandlock(); 3321 3322 /* Default to relatime unless overriden */ 3323 if (!(flags & MS_NOATIME)) 3324 mnt_flags |= MNT_RELATIME; 3325 3326 /* Separate the per-mountpoint flags */ 3327 if (flags & MS_NOSUID) 3328 mnt_flags |= MNT_NOSUID; 3329 if (flags & MS_NODEV) 3330 mnt_flags |= MNT_NODEV; 3331 if (flags & MS_NOEXEC) 3332 mnt_flags |= MNT_NOEXEC; 3333 if (flags & MS_NOATIME) 3334 mnt_flags |= MNT_NOATIME; 3335 if (flags & MS_NODIRATIME) 3336 mnt_flags |= MNT_NODIRATIME; 3337 if (flags & MS_STRICTATIME) 3338 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); 3339 if (flags & MS_RDONLY) 3340 mnt_flags |= MNT_READONLY; 3341 if (flags & MS_NOSYMFOLLOW) 3342 mnt_flags |= MNT_NOSYMFOLLOW; 3343 3344 /* The default atime for remount is preservation */ 3345 if ((flags & MS_REMOUNT) && 3346 ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME | 3347 MS_STRICTATIME)) == 0)) { 3348 mnt_flags &= ~MNT_ATIME_MASK; 3349 mnt_flags |= path->mnt->mnt_flags & MNT_ATIME_MASK; 3350 } 3351 3352 sb_flags = flags & (SB_RDONLY | 3353 SB_SYNCHRONOUS | 3354 SB_MANDLOCK | 3355 SB_DIRSYNC | 3356 SB_SILENT | 3357 SB_POSIXACL | 3358 SB_LAZYTIME | 3359 SB_I_VERSION); 3360 3361 if ((flags & (MS_REMOUNT | MS_BIND)) == (MS_REMOUNT | MS_BIND)) 3362 return do_reconfigure_mnt(path, mnt_flags); 3363 if (flags & MS_REMOUNT) 3364 return do_remount(path, flags, sb_flags, mnt_flags, data_page); 3365 if (flags & MS_BIND) 3366 return do_loopback(path, dev_name, flags & MS_REC); 3367 if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) 3368 return do_change_type(path, flags); 3369 if (flags & MS_MOVE) 3370 return do_move_mount_old(path, dev_name); 3371 3372 return do_new_mount(path, type_page, sb_flags, mnt_flags, dev_name, 3373 data_page); 3374 } 3375 3376 long do_mount(const char *dev_name, const char __user *dir_name, 3377 const char *type_page, unsigned long flags, void *data_page) 3378 { 3379 struct path path; 3380 int ret; 3381 3382 ret = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path); 3383 if (ret) 3384 return ret; 3385 ret = path_mount(dev_name, &path, type_page, flags, data_page); 3386 path_put(&path); 3387 return ret; 3388 } 3389 3390 static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns) 3391 { 3392 return inc_ucount(ns, current_euid(), UCOUNT_MNT_NAMESPACES); 3393 } 3394 3395 static void dec_mnt_namespaces(struct ucounts *ucounts) 3396 { 3397 dec_ucount(ucounts, UCOUNT_MNT_NAMESPACES); 3398 } 3399 3400 static void free_mnt_ns(struct mnt_namespace *ns) 3401 { 3402 if (!is_anon_ns(ns)) 3403 ns_free_inum(&ns->ns); 3404 dec_mnt_namespaces(ns->ucounts); 3405 put_user_ns(ns->user_ns); 3406 kfree(ns); 3407 } 3408 3409 /* 3410 * Assign a sequence number so we can detect when we attempt to bind 3411 * mount a reference to an older mount namespace into the current 3412 * mount namespace, preventing reference counting loops. A 64bit 3413 * number incrementing at 10Ghz will take 12,427 years to wrap which 3414 * is effectively never, so we can ignore the possibility. 3415 */ 3416 static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1); 3417 3418 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool anon) 3419 { 3420 struct mnt_namespace *new_ns; 3421 struct ucounts *ucounts; 3422 int ret; 3423 3424 ucounts = inc_mnt_namespaces(user_ns); 3425 if (!ucounts) 3426 return ERR_PTR(-ENOSPC); 3427 3428 new_ns = kzalloc(sizeof(struct mnt_namespace), GFP_KERNEL_ACCOUNT); 3429 if (!new_ns) { 3430 dec_mnt_namespaces(ucounts); 3431 return ERR_PTR(-ENOMEM); 3432 } 3433 if (!anon) { 3434 ret = ns_alloc_inum(&new_ns->ns); 3435 if (ret) { 3436 kfree(new_ns); 3437 dec_mnt_namespaces(ucounts); 3438 return ERR_PTR(ret); 3439 } 3440 } 3441 new_ns->ns.ops = &mntns_operations; 3442 if (!anon) 3443 new_ns->seq = atomic64_add_return(1, &mnt_ns_seq); 3444 refcount_set(&new_ns->ns.count, 1); 3445 INIT_LIST_HEAD(&new_ns->list); 3446 init_waitqueue_head(&new_ns->poll); 3447 spin_lock_init(&new_ns->ns_lock); 3448 new_ns->user_ns = get_user_ns(user_ns); 3449 new_ns->ucounts = ucounts; 3450 return new_ns; 3451 } 3452 3453 __latent_entropy 3454 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, 3455 struct user_namespace *user_ns, struct fs_struct *new_fs) 3456 { 3457 struct mnt_namespace *new_ns; 3458 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL; 3459 struct mount *p, *q; 3460 struct mount *old; 3461 struct mount *new; 3462 int copy_flags; 3463 3464 BUG_ON(!ns); 3465 3466 if (likely(!(flags & CLONE_NEWNS))) { 3467 get_mnt_ns(ns); 3468 return ns; 3469 } 3470 3471 old = ns->root; 3472 3473 new_ns = alloc_mnt_ns(user_ns, false); 3474 if (IS_ERR(new_ns)) 3475 return new_ns; 3476 3477 namespace_lock(); 3478 /* First pass: copy the tree topology */ 3479 copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE; 3480 if (user_ns != ns->user_ns) 3481 copy_flags |= CL_SHARED_TO_SLAVE; 3482 new = copy_tree(old, old->mnt.mnt_root, copy_flags); 3483 if (IS_ERR(new)) { 3484 namespace_unlock(); 3485 free_mnt_ns(new_ns); 3486 return ERR_CAST(new); 3487 } 3488 if (user_ns != ns->user_ns) { 3489 lock_mount_hash(); 3490 lock_mnt_tree(new); 3491 unlock_mount_hash(); 3492 } 3493 new_ns->root = new; 3494 list_add_tail(&new_ns->list, &new->mnt_list); 3495 3496 /* 3497 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts 3498 * as belonging to new namespace. We have already acquired a private 3499 * fs_struct, so tsk->fs->lock is not needed. 3500 */ 3501 p = old; 3502 q = new; 3503 while (p) { 3504 q->mnt_ns = new_ns; 3505 new_ns->mounts++; 3506 if (new_fs) { 3507 if (&p->mnt == new_fs->root.mnt) { 3508 new_fs->root.mnt = mntget(&q->mnt); 3509 rootmnt = &p->mnt; 3510 } 3511 if (&p->mnt == new_fs->pwd.mnt) { 3512 new_fs->pwd.mnt = mntget(&q->mnt); 3513 pwdmnt = &p->mnt; 3514 } 3515 } 3516 p = next_mnt(p, old); 3517 q = next_mnt(q, new); 3518 if (!q) 3519 break; 3520 // an mntns binding we'd skipped? 3521 while (p->mnt.mnt_root != q->mnt.mnt_root) 3522 p = next_mnt(skip_mnt_tree(p), old); 3523 } 3524 namespace_unlock(); 3525 3526 if (rootmnt) 3527 mntput(rootmnt); 3528 if (pwdmnt) 3529 mntput(pwdmnt); 3530 3531 return new_ns; 3532 } 3533 3534 struct dentry *mount_subtree(struct vfsmount *m, const char *name) 3535 { 3536 struct mount *mnt = real_mount(m); 3537 struct mnt_namespace *ns; 3538 struct super_block *s; 3539 struct path path; 3540 int err; 3541 3542 ns = alloc_mnt_ns(&init_user_ns, true); 3543 if (IS_ERR(ns)) { 3544 mntput(m); 3545 return ERR_CAST(ns); 3546 } 3547 mnt->mnt_ns = ns; 3548 ns->root = mnt; 3549 ns->mounts++; 3550 list_add(&mnt->mnt_list, &ns->list); 3551 3552 err = vfs_path_lookup(m->mnt_root, m, 3553 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); 3554 3555 put_mnt_ns(ns); 3556 3557 if (err) 3558 return ERR_PTR(err); 3559 3560 /* trade a vfsmount reference for active sb one */ 3561 s = path.mnt->mnt_sb; 3562 atomic_inc(&s->s_active); 3563 mntput(path.mnt); 3564 /* lock the sucker */ 3565 down_write(&s->s_umount); 3566 /* ... and return the root of (sub)tree on it */ 3567 return path.dentry; 3568 } 3569 EXPORT_SYMBOL(mount_subtree); 3570 3571 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, 3572 char __user *, type, unsigned long, flags, void __user *, data) 3573 { 3574 int ret; 3575 char *kernel_type; 3576 char *kernel_dev; 3577 void *options; 3578 3579 kernel_type = copy_mount_string(type); 3580 ret = PTR_ERR(kernel_type); 3581 if (IS_ERR(kernel_type)) 3582 goto out_type; 3583 3584 kernel_dev = copy_mount_string(dev_name); 3585 ret = PTR_ERR(kernel_dev); 3586 if (IS_ERR(kernel_dev)) 3587 goto out_dev; 3588 3589 options = copy_mount_options(data); 3590 ret = PTR_ERR(options); 3591 if (IS_ERR(options)) 3592 goto out_data; 3593 3594 ret = do_mount(kernel_dev, dir_name, kernel_type, flags, options); 3595 3596 kfree(options); 3597 out_data: 3598 kfree(kernel_dev); 3599 out_dev: 3600 kfree(kernel_type); 3601 out_type: 3602 return ret; 3603 } 3604 3605 #define FSMOUNT_VALID_FLAGS \ 3606 (MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | \ 3607 MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME | \ 3608 MOUNT_ATTR_NOSYMFOLLOW) 3609 3610 #define MOUNT_SETATTR_VALID_FLAGS (FSMOUNT_VALID_FLAGS | MOUNT_ATTR_IDMAP) 3611 3612 #define MOUNT_SETATTR_PROPAGATION_FLAGS \ 3613 (MS_UNBINDABLE | MS_PRIVATE | MS_SLAVE | MS_SHARED) 3614 3615 static unsigned int attr_flags_to_mnt_flags(u64 attr_flags) 3616 { 3617 unsigned int mnt_flags = 0; 3618 3619 if (attr_flags & MOUNT_ATTR_RDONLY) 3620 mnt_flags |= MNT_READONLY; 3621 if (attr_flags & MOUNT_ATTR_NOSUID) 3622 mnt_flags |= MNT_NOSUID; 3623 if (attr_flags & MOUNT_ATTR_NODEV) 3624 mnt_flags |= MNT_NODEV; 3625 if (attr_flags & MOUNT_ATTR_NOEXEC) 3626 mnt_flags |= MNT_NOEXEC; 3627 if (attr_flags & MOUNT_ATTR_NODIRATIME) 3628 mnt_flags |= MNT_NODIRATIME; 3629 if (attr_flags & MOUNT_ATTR_NOSYMFOLLOW) 3630 mnt_flags |= MNT_NOSYMFOLLOW; 3631 3632 return mnt_flags; 3633 } 3634 3635 /* 3636 * Create a kernel mount representation for a new, prepared superblock 3637 * (specified by fs_fd) and attach to an open_tree-like file descriptor. 3638 */ 3639 SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags, 3640 unsigned int, attr_flags) 3641 { 3642 struct mnt_namespace *ns; 3643 struct fs_context *fc; 3644 struct file *file; 3645 struct path newmount; 3646 struct mount *mnt; 3647 struct fd f; 3648 unsigned int mnt_flags = 0; 3649 long ret; 3650 3651 if (!may_mount()) 3652 return -EPERM; 3653 3654 if ((flags & ~(FSMOUNT_CLOEXEC)) != 0) 3655 return -EINVAL; 3656 3657 if (attr_flags & ~FSMOUNT_VALID_FLAGS) 3658 return -EINVAL; 3659 3660 mnt_flags = attr_flags_to_mnt_flags(attr_flags); 3661 3662 switch (attr_flags & MOUNT_ATTR__ATIME) { 3663 case MOUNT_ATTR_STRICTATIME: 3664 break; 3665 case MOUNT_ATTR_NOATIME: 3666 mnt_flags |= MNT_NOATIME; 3667 break; 3668 case MOUNT_ATTR_RELATIME: 3669 mnt_flags |= MNT_RELATIME; 3670 break; 3671 default: 3672 return -EINVAL; 3673 } 3674 3675 f = fdget(fs_fd); 3676 if (!f.file) 3677 return -EBADF; 3678 3679 ret = -EINVAL; 3680 if (f.file->f_op != &fscontext_fops) 3681 goto err_fsfd; 3682 3683 fc = f.file->private_data; 3684 3685 ret = mutex_lock_interruptible(&fc->uapi_mutex); 3686 if (ret < 0) 3687 goto err_fsfd; 3688 3689 /* There must be a valid superblock or we can't mount it */ 3690 ret = -EINVAL; 3691 if (!fc->root) 3692 goto err_unlock; 3693 3694 ret = -EPERM; 3695 if (mount_too_revealing(fc->root->d_sb, &mnt_flags)) { 3696 pr_warn("VFS: Mount too revealing\n"); 3697 goto err_unlock; 3698 } 3699 3700 ret = -EBUSY; 3701 if (fc->phase != FS_CONTEXT_AWAITING_MOUNT) 3702 goto err_unlock; 3703 3704 if (fc->sb_flags & SB_MANDLOCK) 3705 warn_mandlock(); 3706 3707 newmount.mnt = vfs_create_mount(fc); 3708 if (IS_ERR(newmount.mnt)) { 3709 ret = PTR_ERR(newmount.mnt); 3710 goto err_unlock; 3711 } 3712 newmount.dentry = dget(fc->root); 3713 newmount.mnt->mnt_flags = mnt_flags; 3714 3715 /* We've done the mount bit - now move the file context into more or 3716 * less the same state as if we'd done an fspick(). We don't want to 3717 * do any memory allocation or anything like that at this point as we 3718 * don't want to have to handle any errors incurred. 3719 */ 3720 vfs_clean_context(fc); 3721 3722 ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, true); 3723 if (IS_ERR(ns)) { 3724 ret = PTR_ERR(ns); 3725 goto err_path; 3726 } 3727 mnt = real_mount(newmount.mnt); 3728 mnt->mnt_ns = ns; 3729 ns->root = mnt; 3730 ns->mounts = 1; 3731 list_add(&mnt->mnt_list, &ns->list); 3732 mntget(newmount.mnt); 3733 3734 /* Attach to an apparent O_PATH fd with a note that we need to unmount 3735 * it, not just simply put it. 3736 */ 3737 file = dentry_open(&newmount, O_PATH, fc->cred); 3738 if (IS_ERR(file)) { 3739 dissolve_on_fput(newmount.mnt); 3740 ret = PTR_ERR(file); 3741 goto err_path; 3742 } 3743 file->f_mode |= FMODE_NEED_UNMOUNT; 3744 3745 ret = get_unused_fd_flags((flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0); 3746 if (ret >= 0) 3747 fd_install(ret, file); 3748 else 3749 fput(file); 3750 3751 err_path: 3752 path_put(&newmount); 3753 err_unlock: 3754 mutex_unlock(&fc->uapi_mutex); 3755 err_fsfd: 3756 fdput(f); 3757 return ret; 3758 } 3759 3760 /* 3761 * Move a mount from one place to another. In combination with 3762 * fsopen()/fsmount() this is used to install a new mount and in combination 3763 * with open_tree(OPEN_TREE_CLONE [| AT_RECURSIVE]) it can be used to copy 3764 * a mount subtree. 3765 * 3766 * Note the flags value is a combination of MOVE_MOUNT_* flags. 3767 */ 3768 SYSCALL_DEFINE5(move_mount, 3769 int, from_dfd, const char __user *, from_pathname, 3770 int, to_dfd, const char __user *, to_pathname, 3771 unsigned int, flags) 3772 { 3773 struct path from_path, to_path; 3774 unsigned int lflags; 3775 int ret = 0; 3776 3777 if (!may_mount()) 3778 return -EPERM; 3779 3780 if (flags & ~MOVE_MOUNT__MASK) 3781 return -EINVAL; 3782 3783 /* If someone gives a pathname, they aren't permitted to move 3784 * from an fd that requires unmount as we can't get at the flag 3785 * to clear it afterwards. 3786 */ 3787 lflags = 0; 3788 if (flags & MOVE_MOUNT_F_SYMLINKS) lflags |= LOOKUP_FOLLOW; 3789 if (flags & MOVE_MOUNT_F_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT; 3790 if (flags & MOVE_MOUNT_F_EMPTY_PATH) lflags |= LOOKUP_EMPTY; 3791 3792 ret = user_path_at(from_dfd, from_pathname, lflags, &from_path); 3793 if (ret < 0) 3794 return ret; 3795 3796 lflags = 0; 3797 if (flags & MOVE_MOUNT_T_SYMLINKS) lflags |= LOOKUP_FOLLOW; 3798 if (flags & MOVE_MOUNT_T_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT; 3799 if (flags & MOVE_MOUNT_T_EMPTY_PATH) lflags |= LOOKUP_EMPTY; 3800 3801 ret = user_path_at(to_dfd, to_pathname, lflags, &to_path); 3802 if (ret < 0) 3803 goto out_from; 3804 3805 ret = security_move_mount(&from_path, &to_path); 3806 if (ret < 0) 3807 goto out_to; 3808 3809 if (flags & MOVE_MOUNT_SET_GROUP) 3810 ret = do_set_group(&from_path, &to_path); 3811 else 3812 ret = do_move_mount(&from_path, &to_path); 3813 3814 out_to: 3815 path_put(&to_path); 3816 out_from: 3817 path_put(&from_path); 3818 return ret; 3819 } 3820 3821 /* 3822 * Return true if path is reachable from root 3823 * 3824 * namespace_sem or mount_lock is held 3825 */ 3826 bool is_path_reachable(struct mount *mnt, struct dentry *dentry, 3827 const struct path *root) 3828 { 3829 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) { 3830 dentry = mnt->mnt_mountpoint; 3831 mnt = mnt->mnt_parent; 3832 } 3833 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry); 3834 } 3835 3836 bool path_is_under(const struct path *path1, const struct path *path2) 3837 { 3838 bool res; 3839 read_seqlock_excl(&mount_lock); 3840 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2); 3841 read_sequnlock_excl(&mount_lock); 3842 return res; 3843 } 3844 EXPORT_SYMBOL(path_is_under); 3845 3846 /* 3847 * pivot_root Semantics: 3848 * Moves the root file system of the current process to the directory put_old, 3849 * makes new_root as the new root file system of the current process, and sets 3850 * root/cwd of all processes which had them on the current root to new_root. 3851 * 3852 * Restrictions: 3853 * The new_root and put_old must be directories, and must not be on the 3854 * same file system as the current process root. The put_old must be 3855 * underneath new_root, i.e. adding a non-zero number of /.. to the string 3856 * pointed to by put_old must yield the same directory as new_root. No other 3857 * file system may be mounted on put_old. After all, new_root is a mountpoint. 3858 * 3859 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem. 3860 * See Documentation/filesystems/ramfs-rootfs-initramfs.rst for alternatives 3861 * in this situation. 3862 * 3863 * Notes: 3864 * - we don't move root/cwd if they are not at the root (reason: if something 3865 * cared enough to change them, it's probably wrong to force them elsewhere) 3866 * - it's okay to pick a root that isn't the root of a file system, e.g. 3867 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint, 3868 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root 3869 * first. 3870 */ 3871 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, 3872 const char __user *, put_old) 3873 { 3874 struct path new, old, root; 3875 struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent; 3876 struct mountpoint *old_mp, *root_mp; 3877 int error; 3878 3879 if (!may_mount()) 3880 return -EPERM; 3881 3882 error = user_path_at(AT_FDCWD, new_root, 3883 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &new); 3884 if (error) 3885 goto out0; 3886 3887 error = user_path_at(AT_FDCWD, put_old, 3888 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old); 3889 if (error) 3890 goto out1; 3891 3892 error = security_sb_pivotroot(&old, &new); 3893 if (error) 3894 goto out2; 3895 3896 get_fs_root(current->fs, &root); 3897 old_mp = lock_mount(&old); 3898 error = PTR_ERR(old_mp); 3899 if (IS_ERR(old_mp)) 3900 goto out3; 3901 3902 error = -EINVAL; 3903 new_mnt = real_mount(new.mnt); 3904 root_mnt = real_mount(root.mnt); 3905 old_mnt = real_mount(old.mnt); 3906 ex_parent = new_mnt->mnt_parent; 3907 root_parent = root_mnt->mnt_parent; 3908 if (IS_MNT_SHARED(old_mnt) || 3909 IS_MNT_SHARED(ex_parent) || 3910 IS_MNT_SHARED(root_parent)) 3911 goto out4; 3912 if (!check_mnt(root_mnt) || !check_mnt(new_mnt)) 3913 goto out4; 3914 if (new_mnt->mnt.mnt_flags & MNT_LOCKED) 3915 goto out4; 3916 error = -ENOENT; 3917 if (d_unlinked(new.dentry)) 3918 goto out4; 3919 error = -EBUSY; 3920 if (new_mnt == root_mnt || old_mnt == root_mnt) 3921 goto out4; /* loop, on the same file system */ 3922 error = -EINVAL; 3923 if (root.mnt->mnt_root != root.dentry) 3924 goto out4; /* not a mountpoint */ 3925 if (!mnt_has_parent(root_mnt)) 3926 goto out4; /* not attached */ 3927 if (new.mnt->mnt_root != new.dentry) 3928 goto out4; /* not a mountpoint */ 3929 if (!mnt_has_parent(new_mnt)) 3930 goto out4; /* not attached */ 3931 /* make sure we can reach put_old from new_root */ 3932 if (!is_path_reachable(old_mnt, old.dentry, &new)) 3933 goto out4; 3934 /* make certain new is below the root */ 3935 if (!is_path_reachable(new_mnt, new.dentry, &root)) 3936 goto out4; 3937 lock_mount_hash(); 3938 umount_mnt(new_mnt); 3939 root_mp = unhash_mnt(root_mnt); /* we'll need its mountpoint */ 3940 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) { 3941 new_mnt->mnt.mnt_flags |= MNT_LOCKED; 3942 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED; 3943 } 3944 /* mount old root on put_old */ 3945 attach_mnt(root_mnt, old_mnt, old_mp); 3946 /* mount new_root on / */ 3947 attach_mnt(new_mnt, root_parent, root_mp); 3948 mnt_add_count(root_parent, -1); 3949 touch_mnt_namespace(current->nsproxy->mnt_ns); 3950 /* A moved mount should not expire automatically */ 3951 list_del_init(&new_mnt->mnt_expire); 3952 put_mountpoint(root_mp); 3953 unlock_mount_hash(); 3954 chroot_fs_refs(&root, &new); 3955 error = 0; 3956 out4: 3957 unlock_mount(old_mp); 3958 if (!error) 3959 mntput_no_expire(ex_parent); 3960 out3: 3961 path_put(&root); 3962 out2: 3963 path_put(&old); 3964 out1: 3965 path_put(&new); 3966 out0: 3967 return error; 3968 } 3969 3970 static unsigned int recalc_flags(struct mount_kattr *kattr, struct mount *mnt) 3971 { 3972 unsigned int flags = mnt->mnt.mnt_flags; 3973 3974 /* flags to clear */ 3975 flags &= ~kattr->attr_clr; 3976 /* flags to raise */ 3977 flags |= kattr->attr_set; 3978 3979 return flags; 3980 } 3981 3982 static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt) 3983 { 3984 struct vfsmount *m = &mnt->mnt; 3985 struct user_namespace *fs_userns = m->mnt_sb->s_user_ns; 3986 3987 if (!kattr->mnt_idmap) 3988 return 0; 3989 3990 /* 3991 * Creating an idmapped mount with the filesystem wide idmapping 3992 * doesn't make sense so block that. We don't allow mushy semantics. 3993 */ 3994 if (!check_fsmapping(kattr->mnt_idmap, m->mnt_sb)) 3995 return -EINVAL; 3996 3997 /* 3998 * Once a mount has been idmapped we don't allow it to change its 3999 * mapping. It makes things simpler and callers can just create 4000 * another bind-mount they can idmap if they want to. 4001 */ 4002 if (is_idmapped_mnt(m)) 4003 return -EPERM; 4004 4005 /* The underlying filesystem doesn't support idmapped mounts yet. */ 4006 if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP)) 4007 return -EINVAL; 4008 4009 /* We're not controlling the superblock. */ 4010 if (!ns_capable(fs_userns, CAP_SYS_ADMIN)) 4011 return -EPERM; 4012 4013 /* Mount has already been visible in the filesystem hierarchy. */ 4014 if (!is_anon_ns(mnt->mnt_ns)) 4015 return -EINVAL; 4016 4017 return 0; 4018 } 4019 4020 /** 4021 * mnt_allow_writers() - check whether the attribute change allows writers 4022 * @kattr: the new mount attributes 4023 * @mnt: the mount to which @kattr will be applied 4024 * 4025 * Check whether thew new mount attributes in @kattr allow concurrent writers. 4026 * 4027 * Return: true if writers need to be held, false if not 4028 */ 4029 static inline bool mnt_allow_writers(const struct mount_kattr *kattr, 4030 const struct mount *mnt) 4031 { 4032 return (!(kattr->attr_set & MNT_READONLY) || 4033 (mnt->mnt.mnt_flags & MNT_READONLY)) && 4034 !kattr->mnt_idmap; 4035 } 4036 4037 static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt) 4038 { 4039 struct mount *m; 4040 int err; 4041 4042 for (m = mnt; m; m = next_mnt(m, mnt)) { 4043 if (!can_change_locked_flags(m, recalc_flags(kattr, m))) { 4044 err = -EPERM; 4045 break; 4046 } 4047 4048 err = can_idmap_mount(kattr, m); 4049 if (err) 4050 break; 4051 4052 if (!mnt_allow_writers(kattr, m)) { 4053 err = mnt_hold_writers(m); 4054 if (err) 4055 break; 4056 } 4057 4058 if (!kattr->recurse) 4059 return 0; 4060 } 4061 4062 if (err) { 4063 struct mount *p; 4064 4065 /* 4066 * If we had to call mnt_hold_writers() MNT_WRITE_HOLD will 4067 * be set in @mnt_flags. The loop unsets MNT_WRITE_HOLD for all 4068 * mounts and needs to take care to include the first mount. 4069 */ 4070 for (p = mnt; p; p = next_mnt(p, mnt)) { 4071 /* If we had to hold writers unblock them. */ 4072 if (p->mnt.mnt_flags & MNT_WRITE_HOLD) 4073 mnt_unhold_writers(p); 4074 4075 /* 4076 * We're done once the first mount we changed got 4077 * MNT_WRITE_HOLD unset. 4078 */ 4079 if (p == m) 4080 break; 4081 } 4082 } 4083 return err; 4084 } 4085 4086 static void do_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt) 4087 { 4088 if (!kattr->mnt_idmap) 4089 return; 4090 4091 /* 4092 * Pairs with smp_load_acquire() in mnt_idmap(). 4093 * 4094 * Since we only allow a mount to change the idmapping once and 4095 * verified this in can_idmap_mount() we know that the mount has 4096 * @nop_mnt_idmap attached to it. So there's no need to drop any 4097 * references. 4098 */ 4099 smp_store_release(&mnt->mnt.mnt_idmap, mnt_idmap_get(kattr->mnt_idmap)); 4100 } 4101 4102 static void mount_setattr_commit(struct mount_kattr *kattr, struct mount *mnt) 4103 { 4104 struct mount *m; 4105 4106 for (m = mnt; m; m = next_mnt(m, mnt)) { 4107 unsigned int flags; 4108 4109 do_idmap_mount(kattr, m); 4110 flags = recalc_flags(kattr, m); 4111 WRITE_ONCE(m->mnt.mnt_flags, flags); 4112 4113 /* If we had to hold writers unblock them. */ 4114 if (m->mnt.mnt_flags & MNT_WRITE_HOLD) 4115 mnt_unhold_writers(m); 4116 4117 if (kattr->propagation) 4118 change_mnt_propagation(m, kattr->propagation); 4119 if (!kattr->recurse) 4120 break; 4121 } 4122 touch_mnt_namespace(mnt->mnt_ns); 4123 } 4124 4125 static int do_mount_setattr(struct path *path, struct mount_kattr *kattr) 4126 { 4127 struct mount *mnt = real_mount(path->mnt); 4128 int err = 0; 4129 4130 if (path->dentry != mnt->mnt.mnt_root) 4131 return -EINVAL; 4132 4133 if (kattr->mnt_userns) { 4134 struct mnt_idmap *mnt_idmap; 4135 4136 mnt_idmap = alloc_mnt_idmap(kattr->mnt_userns); 4137 if (IS_ERR(mnt_idmap)) 4138 return PTR_ERR(mnt_idmap); 4139 kattr->mnt_idmap = mnt_idmap; 4140 } 4141 4142 if (kattr->propagation) { 4143 /* 4144 * Only take namespace_lock() if we're actually changing 4145 * propagation. 4146 */ 4147 namespace_lock(); 4148 if (kattr->propagation == MS_SHARED) { 4149 err = invent_group_ids(mnt, kattr->recurse); 4150 if (err) { 4151 namespace_unlock(); 4152 return err; 4153 } 4154 } 4155 } 4156 4157 err = -EINVAL; 4158 lock_mount_hash(); 4159 4160 /* Ensure that this isn't anything purely vfs internal. */ 4161 if (!is_mounted(&mnt->mnt)) 4162 goto out; 4163 4164 /* 4165 * If this is an attached mount make sure it's located in the callers 4166 * mount namespace. If it's not don't let the caller interact with it. 4167 * If this is a detached mount make sure it has an anonymous mount 4168 * namespace attached to it, i.e. we've created it via OPEN_TREE_CLONE. 4169 */ 4170 if (!(mnt_has_parent(mnt) ? check_mnt(mnt) : is_anon_ns(mnt->mnt_ns))) 4171 goto out; 4172 4173 /* 4174 * First, we get the mount tree in a shape where we can change mount 4175 * properties without failure. If we succeeded to do so we commit all 4176 * changes and if we failed we clean up. 4177 */ 4178 err = mount_setattr_prepare(kattr, mnt); 4179 if (!err) 4180 mount_setattr_commit(kattr, mnt); 4181 4182 out: 4183 unlock_mount_hash(); 4184 4185 if (kattr->propagation) { 4186 if (err) 4187 cleanup_group_ids(mnt, NULL); 4188 namespace_unlock(); 4189 } 4190 4191 return err; 4192 } 4193 4194 static int build_mount_idmapped(const struct mount_attr *attr, size_t usize, 4195 struct mount_kattr *kattr, unsigned int flags) 4196 { 4197 int err = 0; 4198 struct ns_common *ns; 4199 struct user_namespace *mnt_userns; 4200 struct file *file; 4201 4202 if (!((attr->attr_set | attr->attr_clr) & MOUNT_ATTR_IDMAP)) 4203 return 0; 4204 4205 /* 4206 * We currently do not support clearing an idmapped mount. If this ever 4207 * is a use-case we can revisit this but for now let's keep it simple 4208 * and not allow it. 4209 */ 4210 if (attr->attr_clr & MOUNT_ATTR_IDMAP) 4211 return -EINVAL; 4212 4213 if (attr->userns_fd > INT_MAX) 4214 return -EINVAL; 4215 4216 file = fget(attr->userns_fd); 4217 if (!file) 4218 return -EBADF; 4219 4220 if (!proc_ns_file(file)) { 4221 err = -EINVAL; 4222 goto out_fput; 4223 } 4224 4225 ns = get_proc_ns(file_inode(file)); 4226 if (ns->ops->type != CLONE_NEWUSER) { 4227 err = -EINVAL; 4228 goto out_fput; 4229 } 4230 4231 /* 4232 * The initial idmapping cannot be used to create an idmapped 4233 * mount. We use the initial idmapping as an indicator of a mount 4234 * that is not idmapped. It can simply be passed into helpers that 4235 * are aware of idmapped mounts as a convenient shortcut. A user 4236 * can just create a dedicated identity mapping to achieve the same 4237 * result. 4238 */ 4239 mnt_userns = container_of(ns, struct user_namespace, ns); 4240 if (mnt_userns == &init_user_ns) { 4241 err = -EPERM; 4242 goto out_fput; 4243 } 4244 4245 /* We're not controlling the target namespace. */ 4246 if (!ns_capable(mnt_userns, CAP_SYS_ADMIN)) { 4247 err = -EPERM; 4248 goto out_fput; 4249 } 4250 4251 kattr->mnt_userns = get_user_ns(mnt_userns); 4252 4253 out_fput: 4254 fput(file); 4255 return err; 4256 } 4257 4258 static int build_mount_kattr(const struct mount_attr *attr, size_t usize, 4259 struct mount_kattr *kattr, unsigned int flags) 4260 { 4261 unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW; 4262 4263 if (flags & AT_NO_AUTOMOUNT) 4264 lookup_flags &= ~LOOKUP_AUTOMOUNT; 4265 if (flags & AT_SYMLINK_NOFOLLOW) 4266 lookup_flags &= ~LOOKUP_FOLLOW; 4267 if (flags & AT_EMPTY_PATH) 4268 lookup_flags |= LOOKUP_EMPTY; 4269 4270 *kattr = (struct mount_kattr) { 4271 .lookup_flags = lookup_flags, 4272 .recurse = !!(flags & AT_RECURSIVE), 4273 }; 4274 4275 if (attr->propagation & ~MOUNT_SETATTR_PROPAGATION_FLAGS) 4276 return -EINVAL; 4277 if (hweight32(attr->propagation & MOUNT_SETATTR_PROPAGATION_FLAGS) > 1) 4278 return -EINVAL; 4279 kattr->propagation = attr->propagation; 4280 4281 if ((attr->attr_set | attr->attr_clr) & ~MOUNT_SETATTR_VALID_FLAGS) 4282 return -EINVAL; 4283 4284 kattr->attr_set = attr_flags_to_mnt_flags(attr->attr_set); 4285 kattr->attr_clr = attr_flags_to_mnt_flags(attr->attr_clr); 4286 4287 /* 4288 * Since the MOUNT_ATTR_<atime> values are an enum, not a bitmap, 4289 * users wanting to transition to a different atime setting cannot 4290 * simply specify the atime setting in @attr_set, but must also 4291 * specify MOUNT_ATTR__ATIME in the @attr_clr field. 4292 * So ensure that MOUNT_ATTR__ATIME can't be partially set in 4293 * @attr_clr and that @attr_set can't have any atime bits set if 4294 * MOUNT_ATTR__ATIME isn't set in @attr_clr. 4295 */ 4296 if (attr->attr_clr & MOUNT_ATTR__ATIME) { 4297 if ((attr->attr_clr & MOUNT_ATTR__ATIME) != MOUNT_ATTR__ATIME) 4298 return -EINVAL; 4299 4300 /* 4301 * Clear all previous time settings as they are mutually 4302 * exclusive. 4303 */ 4304 kattr->attr_clr |= MNT_RELATIME | MNT_NOATIME; 4305 switch (attr->attr_set & MOUNT_ATTR__ATIME) { 4306 case MOUNT_ATTR_RELATIME: 4307 kattr->attr_set |= MNT_RELATIME; 4308 break; 4309 case MOUNT_ATTR_NOATIME: 4310 kattr->attr_set |= MNT_NOATIME; 4311 break; 4312 case MOUNT_ATTR_STRICTATIME: 4313 break; 4314 default: 4315 return -EINVAL; 4316 } 4317 } else { 4318 if (attr->attr_set & MOUNT_ATTR__ATIME) 4319 return -EINVAL; 4320 } 4321 4322 return build_mount_idmapped(attr, usize, kattr, flags); 4323 } 4324 4325 static void finish_mount_kattr(struct mount_kattr *kattr) 4326 { 4327 put_user_ns(kattr->mnt_userns); 4328 kattr->mnt_userns = NULL; 4329 4330 if (kattr->mnt_idmap) 4331 mnt_idmap_put(kattr->mnt_idmap); 4332 } 4333 4334 SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path, 4335 unsigned int, flags, struct mount_attr __user *, uattr, 4336 size_t, usize) 4337 { 4338 int err; 4339 struct path target; 4340 struct mount_attr attr; 4341 struct mount_kattr kattr; 4342 4343 BUILD_BUG_ON(sizeof(struct mount_attr) != MOUNT_ATTR_SIZE_VER0); 4344 4345 if (flags & ~(AT_EMPTY_PATH | 4346 AT_RECURSIVE | 4347 AT_SYMLINK_NOFOLLOW | 4348 AT_NO_AUTOMOUNT)) 4349 return -EINVAL; 4350 4351 if (unlikely(usize > PAGE_SIZE)) 4352 return -E2BIG; 4353 if (unlikely(usize < MOUNT_ATTR_SIZE_VER0)) 4354 return -EINVAL; 4355 4356 if (!may_mount()) 4357 return -EPERM; 4358 4359 err = copy_struct_from_user(&attr, sizeof(attr), uattr, usize); 4360 if (err) 4361 return err; 4362 4363 /* Don't bother walking through the mounts if this is a nop. */ 4364 if (attr.attr_set == 0 && 4365 attr.attr_clr == 0 && 4366 attr.propagation == 0) 4367 return 0; 4368 4369 err = build_mount_kattr(&attr, usize, &kattr, flags); 4370 if (err) 4371 return err; 4372 4373 err = user_path_at(dfd, path, kattr.lookup_flags, &target); 4374 if (!err) { 4375 err = do_mount_setattr(&target, &kattr); 4376 path_put(&target); 4377 } 4378 finish_mount_kattr(&kattr); 4379 return err; 4380 } 4381 4382 static void __init init_mount_tree(void) 4383 { 4384 struct vfsmount *mnt; 4385 struct mount *m; 4386 struct mnt_namespace *ns; 4387 struct path root; 4388 4389 mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL); 4390 if (IS_ERR(mnt)) 4391 panic("Can't create rootfs"); 4392 4393 ns = alloc_mnt_ns(&init_user_ns, false); 4394 if (IS_ERR(ns)) 4395 panic("Can't allocate initial namespace"); 4396 m = real_mount(mnt); 4397 m->mnt_ns = ns; 4398 ns->root = m; 4399 ns->mounts = 1; 4400 list_add(&m->mnt_list, &ns->list); 4401 init_task.nsproxy->mnt_ns = ns; 4402 get_mnt_ns(ns); 4403 4404 root.mnt = mnt; 4405 root.dentry = mnt->mnt_root; 4406 mnt->mnt_flags |= MNT_LOCKED; 4407 4408 set_fs_pwd(current->fs, &root); 4409 set_fs_root(current->fs, &root); 4410 } 4411 4412 void __init mnt_init(void) 4413 { 4414 int err; 4415 4416 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount), 4417 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); 4418 4419 mount_hashtable = alloc_large_system_hash("Mount-cache", 4420 sizeof(struct hlist_head), 4421 mhash_entries, 19, 4422 HASH_ZERO, 4423 &m_hash_shift, &m_hash_mask, 0, 0); 4424 mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache", 4425 sizeof(struct hlist_head), 4426 mphash_entries, 19, 4427 HASH_ZERO, 4428 &mp_hash_shift, &mp_hash_mask, 0, 0); 4429 4430 if (!mount_hashtable || !mountpoint_hashtable) 4431 panic("Failed to allocate mount hash table\n"); 4432 4433 kernfs_init(); 4434 4435 err = sysfs_init(); 4436 if (err) 4437 printk(KERN_WARNING "%s: sysfs_init error: %d\n", 4438 __func__, err); 4439 fs_kobj = kobject_create_and_add("fs", NULL); 4440 if (!fs_kobj) 4441 printk(KERN_WARNING "%s: kobj create error\n", __func__); 4442 shmem_init(); 4443 init_rootfs(); 4444 init_mount_tree(); 4445 } 4446 4447 void put_mnt_ns(struct mnt_namespace *ns) 4448 { 4449 if (!refcount_dec_and_test(&ns->ns.count)) 4450 return; 4451 drop_collected_mounts(&ns->root->mnt); 4452 free_mnt_ns(ns); 4453 } 4454 4455 struct vfsmount *kern_mount(struct file_system_type *type) 4456 { 4457 struct vfsmount *mnt; 4458 mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL); 4459 if (!IS_ERR(mnt)) { 4460 /* 4461 * it is a longterm mount, don't release mnt until 4462 * we unmount before file sys is unregistered 4463 */ 4464 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL; 4465 } 4466 return mnt; 4467 } 4468 EXPORT_SYMBOL_GPL(kern_mount); 4469 4470 void kern_unmount(struct vfsmount *mnt) 4471 { 4472 /* release long term mount so mount point can be released */ 4473 if (!IS_ERR(mnt)) { 4474 mnt_make_shortterm(mnt); 4475 synchronize_rcu(); /* yecchhh... */ 4476 mntput(mnt); 4477 } 4478 } 4479 EXPORT_SYMBOL(kern_unmount); 4480 4481 void kern_unmount_array(struct vfsmount *mnt[], unsigned int num) 4482 { 4483 unsigned int i; 4484 4485 for (i = 0; i < num; i++) 4486 mnt_make_shortterm(mnt[i]); 4487 synchronize_rcu_expedited(); 4488 for (i = 0; i < num; i++) 4489 mntput(mnt[i]); 4490 } 4491 EXPORT_SYMBOL(kern_unmount_array); 4492 4493 bool our_mnt(struct vfsmount *mnt) 4494 { 4495 return check_mnt(real_mount(mnt)); 4496 } 4497 4498 bool current_chrooted(void) 4499 { 4500 /* Does the current process have a non-standard root */ 4501 struct path ns_root; 4502 struct path fs_root; 4503 bool chrooted; 4504 4505 /* Find the namespace root */ 4506 ns_root.mnt = ¤t->nsproxy->mnt_ns->root->mnt; 4507 ns_root.dentry = ns_root.mnt->mnt_root; 4508 path_get(&ns_root); 4509 while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root)) 4510 ; 4511 4512 get_fs_root(current->fs, &fs_root); 4513 4514 chrooted = !path_equal(&fs_root, &ns_root); 4515 4516 path_put(&fs_root); 4517 path_put(&ns_root); 4518 4519 return chrooted; 4520 } 4521 4522 static bool mnt_already_visible(struct mnt_namespace *ns, 4523 const struct super_block *sb, 4524 int *new_mnt_flags) 4525 { 4526 int new_flags = *new_mnt_flags; 4527 struct mount *mnt; 4528 bool visible = false; 4529 4530 down_read(&namespace_sem); 4531 lock_ns_list(ns); 4532 list_for_each_entry(mnt, &ns->list, mnt_list) { 4533 struct mount *child; 4534 int mnt_flags; 4535 4536 if (mnt_is_cursor(mnt)) 4537 continue; 4538 4539 if (mnt->mnt.mnt_sb->s_type != sb->s_type) 4540 continue; 4541 4542 /* This mount is not fully visible if it's root directory 4543 * is not the root directory of the filesystem. 4544 */ 4545 if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root) 4546 continue; 4547 4548 /* A local view of the mount flags */ 4549 mnt_flags = mnt->mnt.mnt_flags; 4550 4551 /* Don't miss readonly hidden in the superblock flags */ 4552 if (sb_rdonly(mnt->mnt.mnt_sb)) 4553 mnt_flags |= MNT_LOCK_READONLY; 4554 4555 /* Verify the mount flags are equal to or more permissive 4556 * than the proposed new mount. 4557 */ 4558 if ((mnt_flags & MNT_LOCK_READONLY) && 4559 !(new_flags & MNT_READONLY)) 4560 continue; 4561 if ((mnt_flags & MNT_LOCK_ATIME) && 4562 ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK))) 4563 continue; 4564 4565 /* This mount is not fully visible if there are any 4566 * locked child mounts that cover anything except for 4567 * empty directories. 4568 */ 4569 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { 4570 struct inode *inode = child->mnt_mountpoint->d_inode; 4571 /* Only worry about locked mounts */ 4572 if (!(child->mnt.mnt_flags & MNT_LOCKED)) 4573 continue; 4574 /* Is the directory permanetly empty? */ 4575 if (!is_empty_dir_inode(inode)) 4576 goto next; 4577 } 4578 /* Preserve the locked attributes */ 4579 *new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \ 4580 MNT_LOCK_ATIME); 4581 visible = true; 4582 goto found; 4583 next: ; 4584 } 4585 found: 4586 unlock_ns_list(ns); 4587 up_read(&namespace_sem); 4588 return visible; 4589 } 4590 4591 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags) 4592 { 4593 const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV; 4594 struct mnt_namespace *ns = current->nsproxy->mnt_ns; 4595 unsigned long s_iflags; 4596 4597 if (ns->user_ns == &init_user_ns) 4598 return false; 4599 4600 /* Can this filesystem be too revealing? */ 4601 s_iflags = sb->s_iflags; 4602 if (!(s_iflags & SB_I_USERNS_VISIBLE)) 4603 return false; 4604 4605 if ((s_iflags & required_iflags) != required_iflags) { 4606 WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n", 4607 required_iflags); 4608 return true; 4609 } 4610 4611 return !mnt_already_visible(ns, sb, new_mnt_flags); 4612 } 4613 4614 bool mnt_may_suid(struct vfsmount *mnt) 4615 { 4616 /* 4617 * Foreign mounts (accessed via fchdir or through /proc 4618 * symlinks) are always treated as if they are nosuid. This 4619 * prevents namespaces from trusting potentially unsafe 4620 * suid/sgid bits, file caps, or security labels that originate 4621 * in other namespaces. 4622 */ 4623 return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) && 4624 current_in_userns(mnt->mnt_sb->s_user_ns); 4625 } 4626 4627 static struct ns_common *mntns_get(struct task_struct *task) 4628 { 4629 struct ns_common *ns = NULL; 4630 struct nsproxy *nsproxy; 4631 4632 task_lock(task); 4633 nsproxy = task->nsproxy; 4634 if (nsproxy) { 4635 ns = &nsproxy->mnt_ns->ns; 4636 get_mnt_ns(to_mnt_ns(ns)); 4637 } 4638 task_unlock(task); 4639 4640 return ns; 4641 } 4642 4643 static void mntns_put(struct ns_common *ns) 4644 { 4645 put_mnt_ns(to_mnt_ns(ns)); 4646 } 4647 4648 static int mntns_install(struct nsset *nsset, struct ns_common *ns) 4649 { 4650 struct nsproxy *nsproxy = nsset->nsproxy; 4651 struct fs_struct *fs = nsset->fs; 4652 struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns; 4653 struct user_namespace *user_ns = nsset->cred->user_ns; 4654 struct path root; 4655 int err; 4656 4657 if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) || 4658 !ns_capable(user_ns, CAP_SYS_CHROOT) || 4659 !ns_capable(user_ns, CAP_SYS_ADMIN)) 4660 return -EPERM; 4661 4662 if (is_anon_ns(mnt_ns)) 4663 return -EINVAL; 4664 4665 if (fs->users != 1) 4666 return -EINVAL; 4667 4668 get_mnt_ns(mnt_ns); 4669 old_mnt_ns = nsproxy->mnt_ns; 4670 nsproxy->mnt_ns = mnt_ns; 4671 4672 /* Find the root */ 4673 err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt, 4674 "/", LOOKUP_DOWN, &root); 4675 if (err) { 4676 /* revert to old namespace */ 4677 nsproxy->mnt_ns = old_mnt_ns; 4678 put_mnt_ns(mnt_ns); 4679 return err; 4680 } 4681 4682 put_mnt_ns(old_mnt_ns); 4683 4684 /* Update the pwd and root */ 4685 set_fs_pwd(fs, &root); 4686 set_fs_root(fs, &root); 4687 4688 path_put(&root); 4689 return 0; 4690 } 4691 4692 static struct user_namespace *mntns_owner(struct ns_common *ns) 4693 { 4694 return to_mnt_ns(ns)->user_ns; 4695 } 4696 4697 const struct proc_ns_operations mntns_operations = { 4698 .name = "mnt", 4699 .type = CLONE_NEWNS, 4700 .get = mntns_get, 4701 .put = mntns_put, 4702 .install = mntns_install, 4703 .owner = mntns_owner, 4704 }; 4705 4706 #ifdef CONFIG_SYSCTL 4707 static struct ctl_table fs_namespace_sysctls[] = { 4708 { 4709 .procname = "mount-max", 4710 .data = &sysctl_mount_max, 4711 .maxlen = sizeof(unsigned int), 4712 .mode = 0644, 4713 .proc_handler = proc_dointvec_minmax, 4714 .extra1 = SYSCTL_ONE, 4715 }, 4716 { } 4717 }; 4718 4719 static int __init init_fs_namespace_sysctls(void) 4720 { 4721 register_sysctl_init("fs", fs_namespace_sysctls); 4722 return 0; 4723 } 4724 fs_initcall(init_fs_namespace_sysctls); 4725 4726 #endif /* CONFIG_SYSCTL */ 4727