xref: /openbmc/linux/fs/namespace.c (revision 035c6e60074f7b5dccc90bfb64816bc82cde7239)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/namespace.c
4  *
5  * (C) Copyright Al Viro 2000, 2001
6  *
7  * Based on code from fs/super.c, copyright Linus Torvalds and others.
8  * Heavily rewritten.
9  */
10 
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/cred.h>
19 #include <linux/idr.h>
20 #include <linux/init.h>		/* init_rootfs */
21 #include <linux/fs_struct.h>	/* get_fs_root et.al. */
22 #include <linux/fsnotify.h>	/* fsnotify_vfsmount_delete */
23 #include <linux/file.h>
24 #include <linux/uaccess.h>
25 #include <linux/proc_ns.h>
26 #include <linux/magic.h>
27 #include <linux/memblock.h>
28 #include <linux/proc_fs.h>
29 #include <linux/task_work.h>
30 #include <linux/sched/task.h>
31 #include <uapi/linux/mount.h>
32 #include <linux/fs_context.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/mnt_idmapping.h>
35 
36 #include "pnode.h"
37 #include "internal.h"
38 
39 /* Maximum number of mounts in a mount namespace */
40 static unsigned int sysctl_mount_max __read_mostly = 100000;
41 
42 static unsigned int m_hash_mask __read_mostly;
43 static unsigned int m_hash_shift __read_mostly;
44 static unsigned int mp_hash_mask __read_mostly;
45 static unsigned int mp_hash_shift __read_mostly;
46 
47 static __initdata unsigned long mhash_entries;
48 static int __init set_mhash_entries(char *str)
49 {
50 	if (!str)
51 		return 0;
52 	mhash_entries = simple_strtoul(str, &str, 0);
53 	return 1;
54 }
55 __setup("mhash_entries=", set_mhash_entries);
56 
57 static __initdata unsigned long mphash_entries;
58 static int __init set_mphash_entries(char *str)
59 {
60 	if (!str)
61 		return 0;
62 	mphash_entries = simple_strtoul(str, &str, 0);
63 	return 1;
64 }
65 __setup("mphash_entries=", set_mphash_entries);
66 
67 static u64 event;
68 static DEFINE_IDA(mnt_id_ida);
69 static DEFINE_IDA(mnt_group_ida);
70 
71 static struct hlist_head *mount_hashtable __read_mostly;
72 static struct hlist_head *mountpoint_hashtable __read_mostly;
73 static struct kmem_cache *mnt_cache __read_mostly;
74 static DECLARE_RWSEM(namespace_sem);
75 static HLIST_HEAD(unmounted);	/* protected by namespace_sem */
76 static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */
77 
78 struct mount_kattr {
79 	unsigned int attr_set;
80 	unsigned int attr_clr;
81 	unsigned int propagation;
82 	unsigned int lookup_flags;
83 	bool recurse;
84 	struct user_namespace *mnt_userns;
85 	struct mnt_idmap *mnt_idmap;
86 };
87 
88 /* /sys/fs */
89 struct kobject *fs_kobj;
90 EXPORT_SYMBOL_GPL(fs_kobj);
91 
92 /*
93  * vfsmount lock may be taken for read to prevent changes to the
94  * vfsmount hash, ie. during mountpoint lookups or walking back
95  * up the tree.
96  *
97  * It should be taken for write in all cases where the vfsmount
98  * tree or hash is modified or when a vfsmount structure is modified.
99  */
100 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
101 
102 static inline void lock_mount_hash(void)
103 {
104 	write_seqlock(&mount_lock);
105 }
106 
107 static inline void unlock_mount_hash(void)
108 {
109 	write_sequnlock(&mount_lock);
110 }
111 
112 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
113 {
114 	unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
115 	tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
116 	tmp = tmp + (tmp >> m_hash_shift);
117 	return &mount_hashtable[tmp & m_hash_mask];
118 }
119 
120 static inline struct hlist_head *mp_hash(struct dentry *dentry)
121 {
122 	unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
123 	tmp = tmp + (tmp >> mp_hash_shift);
124 	return &mountpoint_hashtable[tmp & mp_hash_mask];
125 }
126 
127 static int mnt_alloc_id(struct mount *mnt)
128 {
129 	int res = ida_alloc(&mnt_id_ida, GFP_KERNEL);
130 
131 	if (res < 0)
132 		return res;
133 	mnt->mnt_id = res;
134 	return 0;
135 }
136 
137 static void mnt_free_id(struct mount *mnt)
138 {
139 	ida_free(&mnt_id_ida, mnt->mnt_id);
140 }
141 
142 /*
143  * Allocate a new peer group ID
144  */
145 static int mnt_alloc_group_id(struct mount *mnt)
146 {
147 	int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL);
148 
149 	if (res < 0)
150 		return res;
151 	mnt->mnt_group_id = res;
152 	return 0;
153 }
154 
155 /*
156  * Release a peer group ID
157  */
158 void mnt_release_group_id(struct mount *mnt)
159 {
160 	ida_free(&mnt_group_ida, mnt->mnt_group_id);
161 	mnt->mnt_group_id = 0;
162 }
163 
164 /*
165  * vfsmount lock must be held for read
166  */
167 static inline void mnt_add_count(struct mount *mnt, int n)
168 {
169 #ifdef CONFIG_SMP
170 	this_cpu_add(mnt->mnt_pcp->mnt_count, n);
171 #else
172 	preempt_disable();
173 	mnt->mnt_count += n;
174 	preempt_enable();
175 #endif
176 }
177 
178 /*
179  * vfsmount lock must be held for write
180  */
181 int mnt_get_count(struct mount *mnt)
182 {
183 #ifdef CONFIG_SMP
184 	int count = 0;
185 	int cpu;
186 
187 	for_each_possible_cpu(cpu) {
188 		count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
189 	}
190 
191 	return count;
192 #else
193 	return mnt->mnt_count;
194 #endif
195 }
196 
197 static struct mount *alloc_vfsmnt(const char *name)
198 {
199 	struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
200 	if (mnt) {
201 		int err;
202 
203 		err = mnt_alloc_id(mnt);
204 		if (err)
205 			goto out_free_cache;
206 
207 		if (name) {
208 			mnt->mnt_devname = kstrdup_const(name,
209 							 GFP_KERNEL_ACCOUNT);
210 			if (!mnt->mnt_devname)
211 				goto out_free_id;
212 		}
213 
214 #ifdef CONFIG_SMP
215 		mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
216 		if (!mnt->mnt_pcp)
217 			goto out_free_devname;
218 
219 		this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
220 #else
221 		mnt->mnt_count = 1;
222 		mnt->mnt_writers = 0;
223 #endif
224 
225 		INIT_HLIST_NODE(&mnt->mnt_hash);
226 		INIT_LIST_HEAD(&mnt->mnt_child);
227 		INIT_LIST_HEAD(&mnt->mnt_mounts);
228 		INIT_LIST_HEAD(&mnt->mnt_list);
229 		INIT_LIST_HEAD(&mnt->mnt_expire);
230 		INIT_LIST_HEAD(&mnt->mnt_share);
231 		INIT_LIST_HEAD(&mnt->mnt_slave_list);
232 		INIT_LIST_HEAD(&mnt->mnt_slave);
233 		INIT_HLIST_NODE(&mnt->mnt_mp_list);
234 		INIT_LIST_HEAD(&mnt->mnt_umounting);
235 		INIT_HLIST_HEAD(&mnt->mnt_stuck_children);
236 		mnt->mnt.mnt_idmap = &nop_mnt_idmap;
237 	}
238 	return mnt;
239 
240 #ifdef CONFIG_SMP
241 out_free_devname:
242 	kfree_const(mnt->mnt_devname);
243 #endif
244 out_free_id:
245 	mnt_free_id(mnt);
246 out_free_cache:
247 	kmem_cache_free(mnt_cache, mnt);
248 	return NULL;
249 }
250 
251 /*
252  * Most r/o checks on a fs are for operations that take
253  * discrete amounts of time, like a write() or unlink().
254  * We must keep track of when those operations start
255  * (for permission checks) and when they end, so that
256  * we can determine when writes are able to occur to
257  * a filesystem.
258  */
259 /*
260  * __mnt_is_readonly: check whether a mount is read-only
261  * @mnt: the mount to check for its write status
262  *
263  * This shouldn't be used directly ouside of the VFS.
264  * It does not guarantee that the filesystem will stay
265  * r/w, just that it is right *now*.  This can not and
266  * should not be used in place of IS_RDONLY(inode).
267  * mnt_want/drop_write() will _keep_ the filesystem
268  * r/w.
269  */
270 bool __mnt_is_readonly(struct vfsmount *mnt)
271 {
272 	return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb);
273 }
274 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
275 
276 static inline void mnt_inc_writers(struct mount *mnt)
277 {
278 #ifdef CONFIG_SMP
279 	this_cpu_inc(mnt->mnt_pcp->mnt_writers);
280 #else
281 	mnt->mnt_writers++;
282 #endif
283 }
284 
285 static inline void mnt_dec_writers(struct mount *mnt)
286 {
287 #ifdef CONFIG_SMP
288 	this_cpu_dec(mnt->mnt_pcp->mnt_writers);
289 #else
290 	mnt->mnt_writers--;
291 #endif
292 }
293 
294 static unsigned int mnt_get_writers(struct mount *mnt)
295 {
296 #ifdef CONFIG_SMP
297 	unsigned int count = 0;
298 	int cpu;
299 
300 	for_each_possible_cpu(cpu) {
301 		count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
302 	}
303 
304 	return count;
305 #else
306 	return mnt->mnt_writers;
307 #endif
308 }
309 
310 static int mnt_is_readonly(struct vfsmount *mnt)
311 {
312 	if (READ_ONCE(mnt->mnt_sb->s_readonly_remount))
313 		return 1;
314 	/*
315 	 * The barrier pairs with the barrier in sb_start_ro_state_change()
316 	 * making sure if we don't see s_readonly_remount set yet, we also will
317 	 * not see any superblock / mount flag changes done by remount.
318 	 * It also pairs with the barrier in sb_end_ro_state_change()
319 	 * assuring that if we see s_readonly_remount already cleared, we will
320 	 * see the values of superblock / mount flags updated by remount.
321 	 */
322 	smp_rmb();
323 	return __mnt_is_readonly(mnt);
324 }
325 
326 /*
327  * Most r/o & frozen checks on a fs are for operations that take discrete
328  * amounts of time, like a write() or unlink().  We must keep track of when
329  * those operations start (for permission checks) and when they end, so that we
330  * can determine when writes are able to occur to a filesystem.
331  */
332 /**
333  * __mnt_want_write - get write access to a mount without freeze protection
334  * @m: the mount on which to take a write
335  *
336  * This tells the low-level filesystem that a write is about to be performed to
337  * it, and makes sure that writes are allowed (mnt it read-write) before
338  * returning success. This operation does not protect against filesystem being
339  * frozen. When the write operation is finished, __mnt_drop_write() must be
340  * called. This is effectively a refcount.
341  */
342 int __mnt_want_write(struct vfsmount *m)
343 {
344 	struct mount *mnt = real_mount(m);
345 	int ret = 0;
346 
347 	preempt_disable();
348 	mnt_inc_writers(mnt);
349 	/*
350 	 * The store to mnt_inc_writers must be visible before we pass
351 	 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
352 	 * incremented count after it has set MNT_WRITE_HOLD.
353 	 */
354 	smp_mb();
355 	might_lock(&mount_lock.lock);
356 	while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
357 		if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
358 			cpu_relax();
359 		} else {
360 			/*
361 			 * This prevents priority inversion, if the task
362 			 * setting MNT_WRITE_HOLD got preempted on a remote
363 			 * CPU, and it prevents life lock if the task setting
364 			 * MNT_WRITE_HOLD has a lower priority and is bound to
365 			 * the same CPU as the task that is spinning here.
366 			 */
367 			preempt_enable();
368 			lock_mount_hash();
369 			unlock_mount_hash();
370 			preempt_disable();
371 		}
372 	}
373 	/*
374 	 * The barrier pairs with the barrier sb_start_ro_state_change() making
375 	 * sure that if we see MNT_WRITE_HOLD cleared, we will also see
376 	 * s_readonly_remount set (or even SB_RDONLY / MNT_READONLY flags) in
377 	 * mnt_is_readonly() and bail in case we are racing with remount
378 	 * read-only.
379 	 */
380 	smp_rmb();
381 	if (mnt_is_readonly(m)) {
382 		mnt_dec_writers(mnt);
383 		ret = -EROFS;
384 	}
385 	preempt_enable();
386 
387 	return ret;
388 }
389 
390 /**
391  * mnt_want_write - get write access to a mount
392  * @m: the mount on which to take a write
393  *
394  * This tells the low-level filesystem that a write is about to be performed to
395  * it, and makes sure that writes are allowed (mount is read-write, filesystem
396  * is not frozen) before returning success.  When the write operation is
397  * finished, mnt_drop_write() must be called.  This is effectively a refcount.
398  */
399 int mnt_want_write(struct vfsmount *m)
400 {
401 	int ret;
402 
403 	sb_start_write(m->mnt_sb);
404 	ret = __mnt_want_write(m);
405 	if (ret)
406 		sb_end_write(m->mnt_sb);
407 	return ret;
408 }
409 EXPORT_SYMBOL_GPL(mnt_want_write);
410 
411 /**
412  * __mnt_want_write_file - get write access to a file's mount
413  * @file: the file who's mount on which to take a write
414  *
415  * This is like __mnt_want_write, but if the file is already open for writing it
416  * skips incrementing mnt_writers (since the open file already has a reference)
417  * and instead only does the check for emergency r/o remounts.  This must be
418  * paired with __mnt_drop_write_file.
419  */
420 int __mnt_want_write_file(struct file *file)
421 {
422 	if (file->f_mode & FMODE_WRITER) {
423 		/*
424 		 * Superblock may have become readonly while there are still
425 		 * writable fd's, e.g. due to a fs error with errors=remount-ro
426 		 */
427 		if (__mnt_is_readonly(file->f_path.mnt))
428 			return -EROFS;
429 		return 0;
430 	}
431 	return __mnt_want_write(file->f_path.mnt);
432 }
433 
434 /**
435  * mnt_want_write_file - get write access to a file's mount
436  * @file: the file who's mount on which to take a write
437  *
438  * This is like mnt_want_write, but if the file is already open for writing it
439  * skips incrementing mnt_writers (since the open file already has a reference)
440  * and instead only does the freeze protection and the check for emergency r/o
441  * remounts.  This must be paired with mnt_drop_write_file.
442  */
443 int mnt_want_write_file(struct file *file)
444 {
445 	int ret;
446 
447 	sb_start_write(file_inode(file)->i_sb);
448 	ret = __mnt_want_write_file(file);
449 	if (ret)
450 		sb_end_write(file_inode(file)->i_sb);
451 	return ret;
452 }
453 EXPORT_SYMBOL_GPL(mnt_want_write_file);
454 
455 /**
456  * __mnt_drop_write - give up write access to a mount
457  * @mnt: the mount on which to give up write access
458  *
459  * Tells the low-level filesystem that we are done
460  * performing writes to it.  Must be matched with
461  * __mnt_want_write() call above.
462  */
463 void __mnt_drop_write(struct vfsmount *mnt)
464 {
465 	preempt_disable();
466 	mnt_dec_writers(real_mount(mnt));
467 	preempt_enable();
468 }
469 
470 /**
471  * mnt_drop_write - give up write access to a mount
472  * @mnt: the mount on which to give up write access
473  *
474  * Tells the low-level filesystem that we are done performing writes to it and
475  * also allows filesystem to be frozen again.  Must be matched with
476  * mnt_want_write() call above.
477  */
478 void mnt_drop_write(struct vfsmount *mnt)
479 {
480 	__mnt_drop_write(mnt);
481 	sb_end_write(mnt->mnt_sb);
482 }
483 EXPORT_SYMBOL_GPL(mnt_drop_write);
484 
485 void __mnt_drop_write_file(struct file *file)
486 {
487 	if (!(file->f_mode & FMODE_WRITER))
488 		__mnt_drop_write(file->f_path.mnt);
489 }
490 
491 void mnt_drop_write_file(struct file *file)
492 {
493 	__mnt_drop_write_file(file);
494 	sb_end_write(file_inode(file)->i_sb);
495 }
496 EXPORT_SYMBOL(mnt_drop_write_file);
497 
498 /**
499  * mnt_hold_writers - prevent write access to the given mount
500  * @mnt: mnt to prevent write access to
501  *
502  * Prevents write access to @mnt if there are no active writers for @mnt.
503  * This function needs to be called and return successfully before changing
504  * properties of @mnt that need to remain stable for callers with write access
505  * to @mnt.
506  *
507  * After this functions has been called successfully callers must pair it with
508  * a call to mnt_unhold_writers() in order to stop preventing write access to
509  * @mnt.
510  *
511  * Context: This function expects lock_mount_hash() to be held serializing
512  *          setting MNT_WRITE_HOLD.
513  * Return: On success 0 is returned.
514  *	   On error, -EBUSY is returned.
515  */
516 static inline int mnt_hold_writers(struct mount *mnt)
517 {
518 	mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
519 	/*
520 	 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
521 	 * should be visible before we do.
522 	 */
523 	smp_mb();
524 
525 	/*
526 	 * With writers on hold, if this value is zero, then there are
527 	 * definitely no active writers (although held writers may subsequently
528 	 * increment the count, they'll have to wait, and decrement it after
529 	 * seeing MNT_READONLY).
530 	 *
531 	 * It is OK to have counter incremented on one CPU and decremented on
532 	 * another: the sum will add up correctly. The danger would be when we
533 	 * sum up each counter, if we read a counter before it is incremented,
534 	 * but then read another CPU's count which it has been subsequently
535 	 * decremented from -- we would see more decrements than we should.
536 	 * MNT_WRITE_HOLD protects against this scenario, because
537 	 * mnt_want_write first increments count, then smp_mb, then spins on
538 	 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
539 	 * we're counting up here.
540 	 */
541 	if (mnt_get_writers(mnt) > 0)
542 		return -EBUSY;
543 
544 	return 0;
545 }
546 
547 /**
548  * mnt_unhold_writers - stop preventing write access to the given mount
549  * @mnt: mnt to stop preventing write access to
550  *
551  * Stop preventing write access to @mnt allowing callers to gain write access
552  * to @mnt again.
553  *
554  * This function can only be called after a successful call to
555  * mnt_hold_writers().
556  *
557  * Context: This function expects lock_mount_hash() to be held.
558  */
559 static inline void mnt_unhold_writers(struct mount *mnt)
560 {
561 	/*
562 	 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
563 	 * that become unheld will see MNT_READONLY.
564 	 */
565 	smp_wmb();
566 	mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
567 }
568 
569 static int mnt_make_readonly(struct mount *mnt)
570 {
571 	int ret;
572 
573 	ret = mnt_hold_writers(mnt);
574 	if (!ret)
575 		mnt->mnt.mnt_flags |= MNT_READONLY;
576 	mnt_unhold_writers(mnt);
577 	return ret;
578 }
579 
580 int sb_prepare_remount_readonly(struct super_block *sb)
581 {
582 	struct mount *mnt;
583 	int err = 0;
584 
585 	/* Racy optimization.  Recheck the counter under MNT_WRITE_HOLD */
586 	if (atomic_long_read(&sb->s_remove_count))
587 		return -EBUSY;
588 
589 	lock_mount_hash();
590 	list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
591 		if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
592 			err = mnt_hold_writers(mnt);
593 			if (err)
594 				break;
595 		}
596 	}
597 	if (!err && atomic_long_read(&sb->s_remove_count))
598 		err = -EBUSY;
599 
600 	if (!err)
601 		sb_start_ro_state_change(sb);
602 	list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
603 		if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
604 			mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
605 	}
606 	unlock_mount_hash();
607 
608 	return err;
609 }
610 
611 static void free_vfsmnt(struct mount *mnt)
612 {
613 	mnt_idmap_put(mnt_idmap(&mnt->mnt));
614 	kfree_const(mnt->mnt_devname);
615 #ifdef CONFIG_SMP
616 	free_percpu(mnt->mnt_pcp);
617 #endif
618 	kmem_cache_free(mnt_cache, mnt);
619 }
620 
621 static void delayed_free_vfsmnt(struct rcu_head *head)
622 {
623 	free_vfsmnt(container_of(head, struct mount, mnt_rcu));
624 }
625 
626 /* call under rcu_read_lock */
627 int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
628 {
629 	struct mount *mnt;
630 	if (read_seqretry(&mount_lock, seq))
631 		return 1;
632 	if (bastard == NULL)
633 		return 0;
634 	mnt = real_mount(bastard);
635 	mnt_add_count(mnt, 1);
636 	smp_mb();			// see mntput_no_expire()
637 	if (likely(!read_seqretry(&mount_lock, seq)))
638 		return 0;
639 	if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
640 		mnt_add_count(mnt, -1);
641 		return 1;
642 	}
643 	lock_mount_hash();
644 	if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
645 		mnt_add_count(mnt, -1);
646 		unlock_mount_hash();
647 		return 1;
648 	}
649 	unlock_mount_hash();
650 	/* caller will mntput() */
651 	return -1;
652 }
653 
654 /* call under rcu_read_lock */
655 static bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
656 {
657 	int res = __legitimize_mnt(bastard, seq);
658 	if (likely(!res))
659 		return true;
660 	if (unlikely(res < 0)) {
661 		rcu_read_unlock();
662 		mntput(bastard);
663 		rcu_read_lock();
664 	}
665 	return false;
666 }
667 
668 /**
669  * __lookup_mnt - find first child mount
670  * @mnt:	parent mount
671  * @dentry:	mountpoint
672  *
673  * If @mnt has a child mount @c mounted @dentry find and return it.
674  *
675  * Note that the child mount @c need not be unique. There are cases
676  * where shadow mounts are created. For example, during mount
677  * propagation when a source mount @mnt whose root got overmounted by a
678  * mount @o after path lookup but before @namespace_sem could be
679  * acquired gets copied and propagated. So @mnt gets copied including
680  * @o. When @mnt is propagated to a destination mount @d that already
681  * has another mount @n mounted at the same mountpoint then the source
682  * mount @mnt will be tucked beneath @n, i.e., @n will be mounted on
683  * @mnt and @mnt mounted on @d. Now both @n and @o are mounted at @mnt
684  * on @dentry.
685  *
686  * Return: The first child of @mnt mounted @dentry or NULL.
687  */
688 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
689 {
690 	struct hlist_head *head = m_hash(mnt, dentry);
691 	struct mount *p;
692 
693 	hlist_for_each_entry_rcu(p, head, mnt_hash)
694 		if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
695 			return p;
696 	return NULL;
697 }
698 
699 /*
700  * lookup_mnt - Return the first child mount mounted at path
701  *
702  * "First" means first mounted chronologically.  If you create the
703  * following mounts:
704  *
705  * mount /dev/sda1 /mnt
706  * mount /dev/sda2 /mnt
707  * mount /dev/sda3 /mnt
708  *
709  * Then lookup_mnt() on the base /mnt dentry in the root mount will
710  * return successively the root dentry and vfsmount of /dev/sda1, then
711  * /dev/sda2, then /dev/sda3, then NULL.
712  *
713  * lookup_mnt takes a reference to the found vfsmount.
714  */
715 struct vfsmount *lookup_mnt(const struct path *path)
716 {
717 	struct mount *child_mnt;
718 	struct vfsmount *m;
719 	unsigned seq;
720 
721 	rcu_read_lock();
722 	do {
723 		seq = read_seqbegin(&mount_lock);
724 		child_mnt = __lookup_mnt(path->mnt, path->dentry);
725 		m = child_mnt ? &child_mnt->mnt : NULL;
726 	} while (!legitimize_mnt(m, seq));
727 	rcu_read_unlock();
728 	return m;
729 }
730 
731 static inline void lock_ns_list(struct mnt_namespace *ns)
732 {
733 	spin_lock(&ns->ns_lock);
734 }
735 
736 static inline void unlock_ns_list(struct mnt_namespace *ns)
737 {
738 	spin_unlock(&ns->ns_lock);
739 }
740 
741 static inline bool mnt_is_cursor(struct mount *mnt)
742 {
743 	return mnt->mnt.mnt_flags & MNT_CURSOR;
744 }
745 
746 /*
747  * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
748  *                         current mount namespace.
749  *
750  * The common case is dentries are not mountpoints at all and that
751  * test is handled inline.  For the slow case when we are actually
752  * dealing with a mountpoint of some kind, walk through all of the
753  * mounts in the current mount namespace and test to see if the dentry
754  * is a mountpoint.
755  *
756  * The mount_hashtable is not usable in the context because we
757  * need to identify all mounts that may be in the current mount
758  * namespace not just a mount that happens to have some specified
759  * parent mount.
760  */
761 bool __is_local_mountpoint(struct dentry *dentry)
762 {
763 	struct mnt_namespace *ns = current->nsproxy->mnt_ns;
764 	struct mount *mnt;
765 	bool is_covered = false;
766 
767 	down_read(&namespace_sem);
768 	lock_ns_list(ns);
769 	list_for_each_entry(mnt, &ns->list, mnt_list) {
770 		if (mnt_is_cursor(mnt))
771 			continue;
772 		is_covered = (mnt->mnt_mountpoint == dentry);
773 		if (is_covered)
774 			break;
775 	}
776 	unlock_ns_list(ns);
777 	up_read(&namespace_sem);
778 
779 	return is_covered;
780 }
781 
782 static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
783 {
784 	struct hlist_head *chain = mp_hash(dentry);
785 	struct mountpoint *mp;
786 
787 	hlist_for_each_entry(mp, chain, m_hash) {
788 		if (mp->m_dentry == dentry) {
789 			mp->m_count++;
790 			return mp;
791 		}
792 	}
793 	return NULL;
794 }
795 
796 static struct mountpoint *get_mountpoint(struct dentry *dentry)
797 {
798 	struct mountpoint *mp, *new = NULL;
799 	int ret;
800 
801 	if (d_mountpoint(dentry)) {
802 		/* might be worth a WARN_ON() */
803 		if (d_unlinked(dentry))
804 			return ERR_PTR(-ENOENT);
805 mountpoint:
806 		read_seqlock_excl(&mount_lock);
807 		mp = lookup_mountpoint(dentry);
808 		read_sequnlock_excl(&mount_lock);
809 		if (mp)
810 			goto done;
811 	}
812 
813 	if (!new)
814 		new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
815 	if (!new)
816 		return ERR_PTR(-ENOMEM);
817 
818 
819 	/* Exactly one processes may set d_mounted */
820 	ret = d_set_mounted(dentry);
821 
822 	/* Someone else set d_mounted? */
823 	if (ret == -EBUSY)
824 		goto mountpoint;
825 
826 	/* The dentry is not available as a mountpoint? */
827 	mp = ERR_PTR(ret);
828 	if (ret)
829 		goto done;
830 
831 	/* Add the new mountpoint to the hash table */
832 	read_seqlock_excl(&mount_lock);
833 	new->m_dentry = dget(dentry);
834 	new->m_count = 1;
835 	hlist_add_head(&new->m_hash, mp_hash(dentry));
836 	INIT_HLIST_HEAD(&new->m_list);
837 	read_sequnlock_excl(&mount_lock);
838 
839 	mp = new;
840 	new = NULL;
841 done:
842 	kfree(new);
843 	return mp;
844 }
845 
846 /*
847  * vfsmount lock must be held.  Additionally, the caller is responsible
848  * for serializing calls for given disposal list.
849  */
850 static void __put_mountpoint(struct mountpoint *mp, struct list_head *list)
851 {
852 	if (!--mp->m_count) {
853 		struct dentry *dentry = mp->m_dentry;
854 		BUG_ON(!hlist_empty(&mp->m_list));
855 		spin_lock(&dentry->d_lock);
856 		dentry->d_flags &= ~DCACHE_MOUNTED;
857 		spin_unlock(&dentry->d_lock);
858 		dput_to_list(dentry, list);
859 		hlist_del(&mp->m_hash);
860 		kfree(mp);
861 	}
862 }
863 
864 /* called with namespace_lock and vfsmount lock */
865 static void put_mountpoint(struct mountpoint *mp)
866 {
867 	__put_mountpoint(mp, &ex_mountpoints);
868 }
869 
870 static inline int check_mnt(struct mount *mnt)
871 {
872 	return mnt->mnt_ns == current->nsproxy->mnt_ns;
873 }
874 
875 /*
876  * vfsmount lock must be held for write
877  */
878 static void touch_mnt_namespace(struct mnt_namespace *ns)
879 {
880 	if (ns) {
881 		ns->event = ++event;
882 		wake_up_interruptible(&ns->poll);
883 	}
884 }
885 
886 /*
887  * vfsmount lock must be held for write
888  */
889 static void __touch_mnt_namespace(struct mnt_namespace *ns)
890 {
891 	if (ns && ns->event != event) {
892 		ns->event = event;
893 		wake_up_interruptible(&ns->poll);
894 	}
895 }
896 
897 /*
898  * vfsmount lock must be held for write
899  */
900 static struct mountpoint *unhash_mnt(struct mount *mnt)
901 {
902 	struct mountpoint *mp;
903 	mnt->mnt_parent = mnt;
904 	mnt->mnt_mountpoint = mnt->mnt.mnt_root;
905 	list_del_init(&mnt->mnt_child);
906 	hlist_del_init_rcu(&mnt->mnt_hash);
907 	hlist_del_init(&mnt->mnt_mp_list);
908 	mp = mnt->mnt_mp;
909 	mnt->mnt_mp = NULL;
910 	return mp;
911 }
912 
913 /*
914  * vfsmount lock must be held for write
915  */
916 static void umount_mnt(struct mount *mnt)
917 {
918 	put_mountpoint(unhash_mnt(mnt));
919 }
920 
921 /*
922  * vfsmount lock must be held for write
923  */
924 void mnt_set_mountpoint(struct mount *mnt,
925 			struct mountpoint *mp,
926 			struct mount *child_mnt)
927 {
928 	mp->m_count++;
929 	mnt_add_count(mnt, 1);	/* essentially, that's mntget */
930 	child_mnt->mnt_mountpoint = mp->m_dentry;
931 	child_mnt->mnt_parent = mnt;
932 	child_mnt->mnt_mp = mp;
933 	hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
934 }
935 
936 /**
937  * mnt_set_mountpoint_beneath - mount a mount beneath another one
938  *
939  * @new_parent: the source mount
940  * @top_mnt:    the mount beneath which @new_parent is mounted
941  * @new_mp:     the new mountpoint of @top_mnt on @new_parent
942  *
943  * Remove @top_mnt from its current mountpoint @top_mnt->mnt_mp and
944  * parent @top_mnt->mnt_parent and mount it on top of @new_parent at
945  * @new_mp. And mount @new_parent on the old parent and old
946  * mountpoint of @top_mnt.
947  *
948  * Context: This function expects namespace_lock() and lock_mount_hash()
949  *          to have been acquired in that order.
950  */
951 static void mnt_set_mountpoint_beneath(struct mount *new_parent,
952 				       struct mount *top_mnt,
953 				       struct mountpoint *new_mp)
954 {
955 	struct mount *old_top_parent = top_mnt->mnt_parent;
956 	struct mountpoint *old_top_mp = top_mnt->mnt_mp;
957 
958 	mnt_set_mountpoint(old_top_parent, old_top_mp, new_parent);
959 	mnt_change_mountpoint(new_parent, new_mp, top_mnt);
960 }
961 
962 
963 static void __attach_mnt(struct mount *mnt, struct mount *parent)
964 {
965 	hlist_add_head_rcu(&mnt->mnt_hash,
966 			   m_hash(&parent->mnt, mnt->mnt_mountpoint));
967 	list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
968 }
969 
970 /**
971  * attach_mnt - mount a mount, attach to @mount_hashtable and parent's
972  *              list of child mounts
973  * @parent:  the parent
974  * @mnt:     the new mount
975  * @mp:      the new mountpoint
976  * @beneath: whether to mount @mnt beneath or on top of @parent
977  *
978  * If @beneath is false, mount @mnt at @mp on @parent. Then attach @mnt
979  * to @parent's child mount list and to @mount_hashtable.
980  *
981  * If @beneath is true, remove @mnt from its current parent and
982  * mountpoint and mount it on @mp on @parent, and mount @parent on the
983  * old parent and old mountpoint of @mnt. Finally, attach @parent to
984  * @mnt_hashtable and @parent->mnt_parent->mnt_mounts.
985  *
986  * Note, when __attach_mnt() is called @mnt->mnt_parent already points
987  * to the correct parent.
988  *
989  * Context: This function expects namespace_lock() and lock_mount_hash()
990  *          to have been acquired in that order.
991  */
992 static void attach_mnt(struct mount *mnt, struct mount *parent,
993 		       struct mountpoint *mp, bool beneath)
994 {
995 	if (beneath)
996 		mnt_set_mountpoint_beneath(mnt, parent, mp);
997 	else
998 		mnt_set_mountpoint(parent, mp, mnt);
999 	/*
1000 	 * Note, @mnt->mnt_parent has to be used. If @mnt was mounted
1001 	 * beneath @parent then @mnt will need to be attached to
1002 	 * @parent's old parent, not @parent. IOW, @mnt->mnt_parent
1003 	 * isn't the same mount as @parent.
1004 	 */
1005 	__attach_mnt(mnt, mnt->mnt_parent);
1006 }
1007 
1008 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
1009 {
1010 	struct mountpoint *old_mp = mnt->mnt_mp;
1011 	struct mount *old_parent = mnt->mnt_parent;
1012 
1013 	list_del_init(&mnt->mnt_child);
1014 	hlist_del_init(&mnt->mnt_mp_list);
1015 	hlist_del_init_rcu(&mnt->mnt_hash);
1016 
1017 	attach_mnt(mnt, parent, mp, false);
1018 
1019 	put_mountpoint(old_mp);
1020 	mnt_add_count(old_parent, -1);
1021 }
1022 
1023 /*
1024  * vfsmount lock must be held for write
1025  */
1026 static void commit_tree(struct mount *mnt)
1027 {
1028 	struct mount *parent = mnt->mnt_parent;
1029 	struct mount *m;
1030 	LIST_HEAD(head);
1031 	struct mnt_namespace *n = parent->mnt_ns;
1032 
1033 	BUG_ON(parent == mnt);
1034 
1035 	list_add_tail(&head, &mnt->mnt_list);
1036 	list_for_each_entry(m, &head, mnt_list)
1037 		m->mnt_ns = n;
1038 
1039 	list_splice(&head, n->list.prev);
1040 
1041 	n->mounts += n->pending_mounts;
1042 	n->pending_mounts = 0;
1043 
1044 	__attach_mnt(mnt, parent);
1045 	touch_mnt_namespace(n);
1046 }
1047 
1048 static struct mount *next_mnt(struct mount *p, struct mount *root)
1049 {
1050 	struct list_head *next = p->mnt_mounts.next;
1051 	if (next == &p->mnt_mounts) {
1052 		while (1) {
1053 			if (p == root)
1054 				return NULL;
1055 			next = p->mnt_child.next;
1056 			if (next != &p->mnt_parent->mnt_mounts)
1057 				break;
1058 			p = p->mnt_parent;
1059 		}
1060 	}
1061 	return list_entry(next, struct mount, mnt_child);
1062 }
1063 
1064 static struct mount *skip_mnt_tree(struct mount *p)
1065 {
1066 	struct list_head *prev = p->mnt_mounts.prev;
1067 	while (prev != &p->mnt_mounts) {
1068 		p = list_entry(prev, struct mount, mnt_child);
1069 		prev = p->mnt_mounts.prev;
1070 	}
1071 	return p;
1072 }
1073 
1074 /**
1075  * vfs_create_mount - Create a mount for a configured superblock
1076  * @fc: The configuration context with the superblock attached
1077  *
1078  * Create a mount to an already configured superblock.  If necessary, the
1079  * caller should invoke vfs_get_tree() before calling this.
1080  *
1081  * Note that this does not attach the mount to anything.
1082  */
1083 struct vfsmount *vfs_create_mount(struct fs_context *fc)
1084 {
1085 	struct mount *mnt;
1086 
1087 	if (!fc->root)
1088 		return ERR_PTR(-EINVAL);
1089 
1090 	mnt = alloc_vfsmnt(fc->source ?: "none");
1091 	if (!mnt)
1092 		return ERR_PTR(-ENOMEM);
1093 
1094 	if (fc->sb_flags & SB_KERNMOUNT)
1095 		mnt->mnt.mnt_flags = MNT_INTERNAL;
1096 
1097 	atomic_inc(&fc->root->d_sb->s_active);
1098 	mnt->mnt.mnt_sb		= fc->root->d_sb;
1099 	mnt->mnt.mnt_root	= dget(fc->root);
1100 	mnt->mnt_mountpoint	= mnt->mnt.mnt_root;
1101 	mnt->mnt_parent		= mnt;
1102 
1103 	lock_mount_hash();
1104 	list_add_tail(&mnt->mnt_instance, &mnt->mnt.mnt_sb->s_mounts);
1105 	unlock_mount_hash();
1106 	return &mnt->mnt;
1107 }
1108 EXPORT_SYMBOL(vfs_create_mount);
1109 
1110 struct vfsmount *fc_mount(struct fs_context *fc)
1111 {
1112 	int err = vfs_get_tree(fc);
1113 	if (!err) {
1114 		up_write(&fc->root->d_sb->s_umount);
1115 		return vfs_create_mount(fc);
1116 	}
1117 	return ERR_PTR(err);
1118 }
1119 EXPORT_SYMBOL(fc_mount);
1120 
1121 struct vfsmount *vfs_kern_mount(struct file_system_type *type,
1122 				int flags, const char *name,
1123 				void *data)
1124 {
1125 	struct fs_context *fc;
1126 	struct vfsmount *mnt;
1127 	int ret = 0;
1128 
1129 	if (!type)
1130 		return ERR_PTR(-EINVAL);
1131 
1132 	fc = fs_context_for_mount(type, flags);
1133 	if (IS_ERR(fc))
1134 		return ERR_CAST(fc);
1135 
1136 	if (name)
1137 		ret = vfs_parse_fs_string(fc, "source",
1138 					  name, strlen(name));
1139 	if (!ret)
1140 		ret = parse_monolithic_mount_data(fc, data);
1141 	if (!ret)
1142 		mnt = fc_mount(fc);
1143 	else
1144 		mnt = ERR_PTR(ret);
1145 
1146 	put_fs_context(fc);
1147 	return mnt;
1148 }
1149 EXPORT_SYMBOL_GPL(vfs_kern_mount);
1150 
1151 struct vfsmount *
1152 vfs_submount(const struct dentry *mountpoint, struct file_system_type *type,
1153 	     const char *name, void *data)
1154 {
1155 	/* Until it is worked out how to pass the user namespace
1156 	 * through from the parent mount to the submount don't support
1157 	 * unprivileged mounts with submounts.
1158 	 */
1159 	if (mountpoint->d_sb->s_user_ns != &init_user_ns)
1160 		return ERR_PTR(-EPERM);
1161 
1162 	return vfs_kern_mount(type, SB_SUBMOUNT, name, data);
1163 }
1164 EXPORT_SYMBOL_GPL(vfs_submount);
1165 
1166 static struct mount *clone_mnt(struct mount *old, struct dentry *root,
1167 					int flag)
1168 {
1169 	struct super_block *sb = old->mnt.mnt_sb;
1170 	struct mount *mnt;
1171 	int err;
1172 
1173 	mnt = alloc_vfsmnt(old->mnt_devname);
1174 	if (!mnt)
1175 		return ERR_PTR(-ENOMEM);
1176 
1177 	if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
1178 		mnt->mnt_group_id = 0; /* not a peer of original */
1179 	else
1180 		mnt->mnt_group_id = old->mnt_group_id;
1181 
1182 	if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
1183 		err = mnt_alloc_group_id(mnt);
1184 		if (err)
1185 			goto out_free;
1186 	}
1187 
1188 	mnt->mnt.mnt_flags = old->mnt.mnt_flags;
1189 	mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
1190 
1191 	atomic_inc(&sb->s_active);
1192 	mnt->mnt.mnt_idmap = mnt_idmap_get(mnt_idmap(&old->mnt));
1193 
1194 	mnt->mnt.mnt_sb = sb;
1195 	mnt->mnt.mnt_root = dget(root);
1196 	mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1197 	mnt->mnt_parent = mnt;
1198 	lock_mount_hash();
1199 	list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
1200 	unlock_mount_hash();
1201 
1202 	if ((flag & CL_SLAVE) ||
1203 	    ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
1204 		list_add(&mnt->mnt_slave, &old->mnt_slave_list);
1205 		mnt->mnt_master = old;
1206 		CLEAR_MNT_SHARED(mnt);
1207 	} else if (!(flag & CL_PRIVATE)) {
1208 		if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
1209 			list_add(&mnt->mnt_share, &old->mnt_share);
1210 		if (IS_MNT_SLAVE(old))
1211 			list_add(&mnt->mnt_slave, &old->mnt_slave);
1212 		mnt->mnt_master = old->mnt_master;
1213 	} else {
1214 		CLEAR_MNT_SHARED(mnt);
1215 	}
1216 	if (flag & CL_MAKE_SHARED)
1217 		set_mnt_shared(mnt);
1218 
1219 	/* stick the duplicate mount on the same expiry list
1220 	 * as the original if that was on one */
1221 	if (flag & CL_EXPIRE) {
1222 		if (!list_empty(&old->mnt_expire))
1223 			list_add(&mnt->mnt_expire, &old->mnt_expire);
1224 	}
1225 
1226 	return mnt;
1227 
1228  out_free:
1229 	mnt_free_id(mnt);
1230 	free_vfsmnt(mnt);
1231 	return ERR_PTR(err);
1232 }
1233 
1234 static void cleanup_mnt(struct mount *mnt)
1235 {
1236 	struct hlist_node *p;
1237 	struct mount *m;
1238 	/*
1239 	 * The warning here probably indicates that somebody messed
1240 	 * up a mnt_want/drop_write() pair.  If this happens, the
1241 	 * filesystem was probably unable to make r/w->r/o transitions.
1242 	 * The locking used to deal with mnt_count decrement provides barriers,
1243 	 * so mnt_get_writers() below is safe.
1244 	 */
1245 	WARN_ON(mnt_get_writers(mnt));
1246 	if (unlikely(mnt->mnt_pins.first))
1247 		mnt_pin_kill(mnt);
1248 	hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) {
1249 		hlist_del(&m->mnt_umount);
1250 		mntput(&m->mnt);
1251 	}
1252 	fsnotify_vfsmount_delete(&mnt->mnt);
1253 	dput(mnt->mnt.mnt_root);
1254 	deactivate_super(mnt->mnt.mnt_sb);
1255 	mnt_free_id(mnt);
1256 	call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
1257 }
1258 
1259 static void __cleanup_mnt(struct rcu_head *head)
1260 {
1261 	cleanup_mnt(container_of(head, struct mount, mnt_rcu));
1262 }
1263 
1264 static LLIST_HEAD(delayed_mntput_list);
1265 static void delayed_mntput(struct work_struct *unused)
1266 {
1267 	struct llist_node *node = llist_del_all(&delayed_mntput_list);
1268 	struct mount *m, *t;
1269 
1270 	llist_for_each_entry_safe(m, t, node, mnt_llist)
1271 		cleanup_mnt(m);
1272 }
1273 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
1274 
1275 static void mntput_no_expire(struct mount *mnt)
1276 {
1277 	LIST_HEAD(list);
1278 	int count;
1279 
1280 	rcu_read_lock();
1281 	if (likely(READ_ONCE(mnt->mnt_ns))) {
1282 		/*
1283 		 * Since we don't do lock_mount_hash() here,
1284 		 * ->mnt_ns can change under us.  However, if it's
1285 		 * non-NULL, then there's a reference that won't
1286 		 * be dropped until after an RCU delay done after
1287 		 * turning ->mnt_ns NULL.  So if we observe it
1288 		 * non-NULL under rcu_read_lock(), the reference
1289 		 * we are dropping is not the final one.
1290 		 */
1291 		mnt_add_count(mnt, -1);
1292 		rcu_read_unlock();
1293 		return;
1294 	}
1295 	lock_mount_hash();
1296 	/*
1297 	 * make sure that if __legitimize_mnt() has not seen us grab
1298 	 * mount_lock, we'll see their refcount increment here.
1299 	 */
1300 	smp_mb();
1301 	mnt_add_count(mnt, -1);
1302 	count = mnt_get_count(mnt);
1303 	if (count != 0) {
1304 		WARN_ON(count < 0);
1305 		rcu_read_unlock();
1306 		unlock_mount_hash();
1307 		return;
1308 	}
1309 	if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
1310 		rcu_read_unlock();
1311 		unlock_mount_hash();
1312 		return;
1313 	}
1314 	mnt->mnt.mnt_flags |= MNT_DOOMED;
1315 	rcu_read_unlock();
1316 
1317 	list_del(&mnt->mnt_instance);
1318 
1319 	if (unlikely(!list_empty(&mnt->mnt_mounts))) {
1320 		struct mount *p, *tmp;
1321 		list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts,  mnt_child) {
1322 			__put_mountpoint(unhash_mnt(p), &list);
1323 			hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children);
1324 		}
1325 	}
1326 	unlock_mount_hash();
1327 	shrink_dentry_list(&list);
1328 
1329 	if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
1330 		struct task_struct *task = current;
1331 		if (likely(!(task->flags & PF_KTHREAD))) {
1332 			init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
1333 			if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME))
1334 				return;
1335 		}
1336 		if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
1337 			schedule_delayed_work(&delayed_mntput_work, 1);
1338 		return;
1339 	}
1340 	cleanup_mnt(mnt);
1341 }
1342 
1343 void mntput(struct vfsmount *mnt)
1344 {
1345 	if (mnt) {
1346 		struct mount *m = real_mount(mnt);
1347 		/* avoid cacheline pingpong, hope gcc doesn't get "smart" */
1348 		if (unlikely(m->mnt_expiry_mark))
1349 			m->mnt_expiry_mark = 0;
1350 		mntput_no_expire(m);
1351 	}
1352 }
1353 EXPORT_SYMBOL(mntput);
1354 
1355 struct vfsmount *mntget(struct vfsmount *mnt)
1356 {
1357 	if (mnt)
1358 		mnt_add_count(real_mount(mnt), 1);
1359 	return mnt;
1360 }
1361 EXPORT_SYMBOL(mntget);
1362 
1363 /*
1364  * Make a mount point inaccessible to new lookups.
1365  * Because there may still be current users, the caller MUST WAIT
1366  * for an RCU grace period before destroying the mount point.
1367  */
1368 void mnt_make_shortterm(struct vfsmount *mnt)
1369 {
1370 	if (mnt)
1371 		real_mount(mnt)->mnt_ns = NULL;
1372 }
1373 
1374 /**
1375  * path_is_mountpoint() - Check if path is a mount in the current namespace.
1376  * @path: path to check
1377  *
1378  *  d_mountpoint() can only be used reliably to establish if a dentry is
1379  *  not mounted in any namespace and that common case is handled inline.
1380  *  d_mountpoint() isn't aware of the possibility there may be multiple
1381  *  mounts using a given dentry in a different namespace. This function
1382  *  checks if the passed in path is a mountpoint rather than the dentry
1383  *  alone.
1384  */
1385 bool path_is_mountpoint(const struct path *path)
1386 {
1387 	unsigned seq;
1388 	bool res;
1389 
1390 	if (!d_mountpoint(path->dentry))
1391 		return false;
1392 
1393 	rcu_read_lock();
1394 	do {
1395 		seq = read_seqbegin(&mount_lock);
1396 		res = __path_is_mountpoint(path);
1397 	} while (read_seqretry(&mount_lock, seq));
1398 	rcu_read_unlock();
1399 
1400 	return res;
1401 }
1402 EXPORT_SYMBOL(path_is_mountpoint);
1403 
1404 struct vfsmount *mnt_clone_internal(const struct path *path)
1405 {
1406 	struct mount *p;
1407 	p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE);
1408 	if (IS_ERR(p))
1409 		return ERR_CAST(p);
1410 	p->mnt.mnt_flags |= MNT_INTERNAL;
1411 	return &p->mnt;
1412 }
1413 
1414 #ifdef CONFIG_PROC_FS
1415 static struct mount *mnt_list_next(struct mnt_namespace *ns,
1416 				   struct list_head *p)
1417 {
1418 	struct mount *mnt, *ret = NULL;
1419 
1420 	lock_ns_list(ns);
1421 	list_for_each_continue(p, &ns->list) {
1422 		mnt = list_entry(p, typeof(*mnt), mnt_list);
1423 		if (!mnt_is_cursor(mnt)) {
1424 			ret = mnt;
1425 			break;
1426 		}
1427 	}
1428 	unlock_ns_list(ns);
1429 
1430 	return ret;
1431 }
1432 
1433 /* iterator; we want it to have access to namespace_sem, thus here... */
1434 static void *m_start(struct seq_file *m, loff_t *pos)
1435 {
1436 	struct proc_mounts *p = m->private;
1437 	struct list_head *prev;
1438 
1439 	down_read(&namespace_sem);
1440 	if (!*pos) {
1441 		prev = &p->ns->list;
1442 	} else {
1443 		prev = &p->cursor.mnt_list;
1444 
1445 		/* Read after we'd reached the end? */
1446 		if (list_empty(prev))
1447 			return NULL;
1448 	}
1449 
1450 	return mnt_list_next(p->ns, prev);
1451 }
1452 
1453 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
1454 {
1455 	struct proc_mounts *p = m->private;
1456 	struct mount *mnt = v;
1457 
1458 	++*pos;
1459 	return mnt_list_next(p->ns, &mnt->mnt_list);
1460 }
1461 
1462 static void m_stop(struct seq_file *m, void *v)
1463 {
1464 	struct proc_mounts *p = m->private;
1465 	struct mount *mnt = v;
1466 
1467 	lock_ns_list(p->ns);
1468 	if (mnt)
1469 		list_move_tail(&p->cursor.mnt_list, &mnt->mnt_list);
1470 	else
1471 		list_del_init(&p->cursor.mnt_list);
1472 	unlock_ns_list(p->ns);
1473 	up_read(&namespace_sem);
1474 }
1475 
1476 static int m_show(struct seq_file *m, void *v)
1477 {
1478 	struct proc_mounts *p = m->private;
1479 	struct mount *r = v;
1480 	return p->show(m, &r->mnt);
1481 }
1482 
1483 const struct seq_operations mounts_op = {
1484 	.start	= m_start,
1485 	.next	= m_next,
1486 	.stop	= m_stop,
1487 	.show	= m_show,
1488 };
1489 
1490 void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor)
1491 {
1492 	down_read(&namespace_sem);
1493 	lock_ns_list(ns);
1494 	list_del(&cursor->mnt_list);
1495 	unlock_ns_list(ns);
1496 	up_read(&namespace_sem);
1497 }
1498 #endif  /* CONFIG_PROC_FS */
1499 
1500 /**
1501  * may_umount_tree - check if a mount tree is busy
1502  * @m: root of mount tree
1503  *
1504  * This is called to check if a tree of mounts has any
1505  * open files, pwds, chroots or sub mounts that are
1506  * busy.
1507  */
1508 int may_umount_tree(struct vfsmount *m)
1509 {
1510 	struct mount *mnt = real_mount(m);
1511 	int actual_refs = 0;
1512 	int minimum_refs = 0;
1513 	struct mount *p;
1514 	BUG_ON(!m);
1515 
1516 	/* write lock needed for mnt_get_count */
1517 	lock_mount_hash();
1518 	for (p = mnt; p; p = next_mnt(p, mnt)) {
1519 		actual_refs += mnt_get_count(p);
1520 		minimum_refs += 2;
1521 	}
1522 	unlock_mount_hash();
1523 
1524 	if (actual_refs > minimum_refs)
1525 		return 0;
1526 
1527 	return 1;
1528 }
1529 
1530 EXPORT_SYMBOL(may_umount_tree);
1531 
1532 /**
1533  * may_umount - check if a mount point is busy
1534  * @mnt: root of mount
1535  *
1536  * This is called to check if a mount point has any
1537  * open files, pwds, chroots or sub mounts. If the
1538  * mount has sub mounts this will return busy
1539  * regardless of whether the sub mounts are busy.
1540  *
1541  * Doesn't take quota and stuff into account. IOW, in some cases it will
1542  * give false negatives. The main reason why it's here is that we need
1543  * a non-destructive way to look for easily umountable filesystems.
1544  */
1545 int may_umount(struct vfsmount *mnt)
1546 {
1547 	int ret = 1;
1548 	down_read(&namespace_sem);
1549 	lock_mount_hash();
1550 	if (propagate_mount_busy(real_mount(mnt), 2))
1551 		ret = 0;
1552 	unlock_mount_hash();
1553 	up_read(&namespace_sem);
1554 	return ret;
1555 }
1556 
1557 EXPORT_SYMBOL(may_umount);
1558 
1559 static void namespace_unlock(void)
1560 {
1561 	struct hlist_head head;
1562 	struct hlist_node *p;
1563 	struct mount *m;
1564 	LIST_HEAD(list);
1565 
1566 	hlist_move_list(&unmounted, &head);
1567 	list_splice_init(&ex_mountpoints, &list);
1568 
1569 	up_write(&namespace_sem);
1570 
1571 	shrink_dentry_list(&list);
1572 
1573 	if (likely(hlist_empty(&head)))
1574 		return;
1575 
1576 	synchronize_rcu_expedited();
1577 
1578 	hlist_for_each_entry_safe(m, p, &head, mnt_umount) {
1579 		hlist_del(&m->mnt_umount);
1580 		mntput(&m->mnt);
1581 	}
1582 }
1583 
1584 static inline void namespace_lock(void)
1585 {
1586 	down_write(&namespace_sem);
1587 }
1588 
1589 enum umount_tree_flags {
1590 	UMOUNT_SYNC = 1,
1591 	UMOUNT_PROPAGATE = 2,
1592 	UMOUNT_CONNECTED = 4,
1593 };
1594 
1595 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
1596 {
1597 	/* Leaving mounts connected is only valid for lazy umounts */
1598 	if (how & UMOUNT_SYNC)
1599 		return true;
1600 
1601 	/* A mount without a parent has nothing to be connected to */
1602 	if (!mnt_has_parent(mnt))
1603 		return true;
1604 
1605 	/* Because the reference counting rules change when mounts are
1606 	 * unmounted and connected, umounted mounts may not be
1607 	 * connected to mounted mounts.
1608 	 */
1609 	if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
1610 		return true;
1611 
1612 	/* Has it been requested that the mount remain connected? */
1613 	if (how & UMOUNT_CONNECTED)
1614 		return false;
1615 
1616 	/* Is the mount locked such that it needs to remain connected? */
1617 	if (IS_MNT_LOCKED(mnt))
1618 		return false;
1619 
1620 	/* By default disconnect the mount */
1621 	return true;
1622 }
1623 
1624 /*
1625  * mount_lock must be held
1626  * namespace_sem must be held for write
1627  */
1628 static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1629 {
1630 	LIST_HEAD(tmp_list);
1631 	struct mount *p;
1632 
1633 	if (how & UMOUNT_PROPAGATE)
1634 		propagate_mount_unlock(mnt);
1635 
1636 	/* Gather the mounts to umount */
1637 	for (p = mnt; p; p = next_mnt(p, mnt)) {
1638 		p->mnt.mnt_flags |= MNT_UMOUNT;
1639 		list_move(&p->mnt_list, &tmp_list);
1640 	}
1641 
1642 	/* Hide the mounts from mnt_mounts */
1643 	list_for_each_entry(p, &tmp_list, mnt_list) {
1644 		list_del_init(&p->mnt_child);
1645 	}
1646 
1647 	/* Add propogated mounts to the tmp_list */
1648 	if (how & UMOUNT_PROPAGATE)
1649 		propagate_umount(&tmp_list);
1650 
1651 	while (!list_empty(&tmp_list)) {
1652 		struct mnt_namespace *ns;
1653 		bool disconnect;
1654 		p = list_first_entry(&tmp_list, struct mount, mnt_list);
1655 		list_del_init(&p->mnt_expire);
1656 		list_del_init(&p->mnt_list);
1657 		ns = p->mnt_ns;
1658 		if (ns) {
1659 			ns->mounts--;
1660 			__touch_mnt_namespace(ns);
1661 		}
1662 		p->mnt_ns = NULL;
1663 		if (how & UMOUNT_SYNC)
1664 			p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1665 
1666 		disconnect = disconnect_mount(p, how);
1667 		if (mnt_has_parent(p)) {
1668 			mnt_add_count(p->mnt_parent, -1);
1669 			if (!disconnect) {
1670 				/* Don't forget about p */
1671 				list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
1672 			} else {
1673 				umount_mnt(p);
1674 			}
1675 		}
1676 		change_mnt_propagation(p, MS_PRIVATE);
1677 		if (disconnect)
1678 			hlist_add_head(&p->mnt_umount, &unmounted);
1679 	}
1680 }
1681 
1682 static void shrink_submounts(struct mount *mnt);
1683 
1684 static int do_umount_root(struct super_block *sb)
1685 {
1686 	int ret = 0;
1687 
1688 	down_write(&sb->s_umount);
1689 	if (!sb_rdonly(sb)) {
1690 		struct fs_context *fc;
1691 
1692 		fc = fs_context_for_reconfigure(sb->s_root, SB_RDONLY,
1693 						SB_RDONLY);
1694 		if (IS_ERR(fc)) {
1695 			ret = PTR_ERR(fc);
1696 		} else {
1697 			ret = parse_monolithic_mount_data(fc, NULL);
1698 			if (!ret)
1699 				ret = reconfigure_super(fc);
1700 			put_fs_context(fc);
1701 		}
1702 	}
1703 	up_write(&sb->s_umount);
1704 	return ret;
1705 }
1706 
1707 static int do_umount(struct mount *mnt, int flags)
1708 {
1709 	struct super_block *sb = mnt->mnt.mnt_sb;
1710 	int retval;
1711 
1712 	retval = security_sb_umount(&mnt->mnt, flags);
1713 	if (retval)
1714 		return retval;
1715 
1716 	/*
1717 	 * Allow userspace to request a mountpoint be expired rather than
1718 	 * unmounting unconditionally. Unmount only happens if:
1719 	 *  (1) the mark is already set (the mark is cleared by mntput())
1720 	 *  (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1721 	 */
1722 	if (flags & MNT_EXPIRE) {
1723 		if (&mnt->mnt == current->fs->root.mnt ||
1724 		    flags & (MNT_FORCE | MNT_DETACH))
1725 			return -EINVAL;
1726 
1727 		/*
1728 		 * probably don't strictly need the lock here if we examined
1729 		 * all race cases, but it's a slowpath.
1730 		 */
1731 		lock_mount_hash();
1732 		if (mnt_get_count(mnt) != 2) {
1733 			unlock_mount_hash();
1734 			return -EBUSY;
1735 		}
1736 		unlock_mount_hash();
1737 
1738 		if (!xchg(&mnt->mnt_expiry_mark, 1))
1739 			return -EAGAIN;
1740 	}
1741 
1742 	/*
1743 	 * If we may have to abort operations to get out of this
1744 	 * mount, and they will themselves hold resources we must
1745 	 * allow the fs to do things. In the Unix tradition of
1746 	 * 'Gee thats tricky lets do it in userspace' the umount_begin
1747 	 * might fail to complete on the first run through as other tasks
1748 	 * must return, and the like. Thats for the mount program to worry
1749 	 * about for the moment.
1750 	 */
1751 
1752 	if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1753 		sb->s_op->umount_begin(sb);
1754 	}
1755 
1756 	/*
1757 	 * No sense to grab the lock for this test, but test itself looks
1758 	 * somewhat bogus. Suggestions for better replacement?
1759 	 * Ho-hum... In principle, we might treat that as umount + switch
1760 	 * to rootfs. GC would eventually take care of the old vfsmount.
1761 	 * Actually it makes sense, especially if rootfs would contain a
1762 	 * /reboot - static binary that would close all descriptors and
1763 	 * call reboot(9). Then init(8) could umount root and exec /reboot.
1764 	 */
1765 	if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1766 		/*
1767 		 * Special case for "unmounting" root ...
1768 		 * we just try to remount it readonly.
1769 		 */
1770 		if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
1771 			return -EPERM;
1772 		return do_umount_root(sb);
1773 	}
1774 
1775 	namespace_lock();
1776 	lock_mount_hash();
1777 
1778 	/* Recheck MNT_LOCKED with the locks held */
1779 	retval = -EINVAL;
1780 	if (mnt->mnt.mnt_flags & MNT_LOCKED)
1781 		goto out;
1782 
1783 	event++;
1784 	if (flags & MNT_DETACH) {
1785 		if (!list_empty(&mnt->mnt_list))
1786 			umount_tree(mnt, UMOUNT_PROPAGATE);
1787 		retval = 0;
1788 	} else {
1789 		shrink_submounts(mnt);
1790 		retval = -EBUSY;
1791 		if (!propagate_mount_busy(mnt, 2)) {
1792 			if (!list_empty(&mnt->mnt_list))
1793 				umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
1794 			retval = 0;
1795 		}
1796 	}
1797 out:
1798 	unlock_mount_hash();
1799 	namespace_unlock();
1800 	return retval;
1801 }
1802 
1803 /*
1804  * __detach_mounts - lazily unmount all mounts on the specified dentry
1805  *
1806  * During unlink, rmdir, and d_drop it is possible to loose the path
1807  * to an existing mountpoint, and wind up leaking the mount.
1808  * detach_mounts allows lazily unmounting those mounts instead of
1809  * leaking them.
1810  *
1811  * The caller may hold dentry->d_inode->i_mutex.
1812  */
1813 void __detach_mounts(struct dentry *dentry)
1814 {
1815 	struct mountpoint *mp;
1816 	struct mount *mnt;
1817 
1818 	namespace_lock();
1819 	lock_mount_hash();
1820 	mp = lookup_mountpoint(dentry);
1821 	if (!mp)
1822 		goto out_unlock;
1823 
1824 	event++;
1825 	while (!hlist_empty(&mp->m_list)) {
1826 		mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1827 		if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
1828 			umount_mnt(mnt);
1829 			hlist_add_head(&mnt->mnt_umount, &unmounted);
1830 		}
1831 		else umount_tree(mnt, UMOUNT_CONNECTED);
1832 	}
1833 	put_mountpoint(mp);
1834 out_unlock:
1835 	unlock_mount_hash();
1836 	namespace_unlock();
1837 }
1838 
1839 /*
1840  * Is the caller allowed to modify his namespace?
1841  */
1842 bool may_mount(void)
1843 {
1844 	return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
1845 }
1846 
1847 /**
1848  * path_mounted - check whether path is mounted
1849  * @path: path to check
1850  *
1851  * Determine whether @path refers to the root of a mount.
1852  *
1853  * Return: true if @path is the root of a mount, false if not.
1854  */
1855 static inline bool path_mounted(const struct path *path)
1856 {
1857 	return path->mnt->mnt_root == path->dentry;
1858 }
1859 
1860 static void warn_mandlock(void)
1861 {
1862 	pr_warn_once("=======================================================\n"
1863 		     "WARNING: The mand mount option has been deprecated and\n"
1864 		     "         and is ignored by this kernel. Remove the mand\n"
1865 		     "         option from the mount to silence this warning.\n"
1866 		     "=======================================================\n");
1867 }
1868 
1869 static int can_umount(const struct path *path, int flags)
1870 {
1871 	struct mount *mnt = real_mount(path->mnt);
1872 	struct super_block *sb = path->dentry->d_sb;
1873 
1874 	if (!may_mount())
1875 		return -EPERM;
1876 	if (!path_mounted(path))
1877 		return -EINVAL;
1878 	if (!check_mnt(mnt))
1879 		return -EINVAL;
1880 	if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
1881 		return -EINVAL;
1882 	if (flags & MNT_FORCE && !ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
1883 		return -EPERM;
1884 	return 0;
1885 }
1886 
1887 // caller is responsible for flags being sane
1888 int path_umount(struct path *path, int flags)
1889 {
1890 	struct mount *mnt = real_mount(path->mnt);
1891 	int ret;
1892 
1893 	ret = can_umount(path, flags);
1894 	if (!ret)
1895 		ret = do_umount(mnt, flags);
1896 
1897 	/* we mustn't call path_put() as that would clear mnt_expiry_mark */
1898 	dput(path->dentry);
1899 	mntput_no_expire(mnt);
1900 	return ret;
1901 }
1902 
1903 static int ksys_umount(char __user *name, int flags)
1904 {
1905 	int lookup_flags = LOOKUP_MOUNTPOINT;
1906 	struct path path;
1907 	int ret;
1908 
1909 	// basic validity checks done first
1910 	if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
1911 		return -EINVAL;
1912 
1913 	if (!(flags & UMOUNT_NOFOLLOW))
1914 		lookup_flags |= LOOKUP_FOLLOW;
1915 	ret = user_path_at(AT_FDCWD, name, lookup_flags, &path);
1916 	if (ret)
1917 		return ret;
1918 	return path_umount(&path, flags);
1919 }
1920 
1921 SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1922 {
1923 	return ksys_umount(name, flags);
1924 }
1925 
1926 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1927 
1928 /*
1929  *	The 2.0 compatible umount. No flags.
1930  */
1931 SYSCALL_DEFINE1(oldumount, char __user *, name)
1932 {
1933 	return ksys_umount(name, 0);
1934 }
1935 
1936 #endif
1937 
1938 static bool is_mnt_ns_file(struct dentry *dentry)
1939 {
1940 	/* Is this a proxy for a mount namespace? */
1941 	return dentry->d_op == &ns_dentry_operations &&
1942 	       dentry->d_fsdata == &mntns_operations;
1943 }
1944 
1945 static struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
1946 {
1947 	return container_of(ns, struct mnt_namespace, ns);
1948 }
1949 
1950 struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
1951 {
1952 	return &mnt->ns;
1953 }
1954 
1955 static bool mnt_ns_loop(struct dentry *dentry)
1956 {
1957 	/* Could bind mounting the mount namespace inode cause a
1958 	 * mount namespace loop?
1959 	 */
1960 	struct mnt_namespace *mnt_ns;
1961 	if (!is_mnt_ns_file(dentry))
1962 		return false;
1963 
1964 	mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode));
1965 	return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
1966 }
1967 
1968 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1969 					int flag)
1970 {
1971 	struct mount *res, *p, *q, *r, *parent;
1972 
1973 	if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt))
1974 		return ERR_PTR(-EINVAL);
1975 
1976 	if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
1977 		return ERR_PTR(-EINVAL);
1978 
1979 	res = q = clone_mnt(mnt, dentry, flag);
1980 	if (IS_ERR(q))
1981 		return q;
1982 
1983 	q->mnt_mountpoint = mnt->mnt_mountpoint;
1984 
1985 	p = mnt;
1986 	list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1987 		struct mount *s;
1988 		if (!is_subdir(r->mnt_mountpoint, dentry))
1989 			continue;
1990 
1991 		for (s = r; s; s = next_mnt(s, r)) {
1992 			if (!(flag & CL_COPY_UNBINDABLE) &&
1993 			    IS_MNT_UNBINDABLE(s)) {
1994 				if (s->mnt.mnt_flags & MNT_LOCKED) {
1995 					/* Both unbindable and locked. */
1996 					q = ERR_PTR(-EPERM);
1997 					goto out;
1998 				} else {
1999 					s = skip_mnt_tree(s);
2000 					continue;
2001 				}
2002 			}
2003 			if (!(flag & CL_COPY_MNT_NS_FILE) &&
2004 			    is_mnt_ns_file(s->mnt.mnt_root)) {
2005 				s = skip_mnt_tree(s);
2006 				continue;
2007 			}
2008 			while (p != s->mnt_parent) {
2009 				p = p->mnt_parent;
2010 				q = q->mnt_parent;
2011 			}
2012 			p = s;
2013 			parent = q;
2014 			q = clone_mnt(p, p->mnt.mnt_root, flag);
2015 			if (IS_ERR(q))
2016 				goto out;
2017 			lock_mount_hash();
2018 			list_add_tail(&q->mnt_list, &res->mnt_list);
2019 			attach_mnt(q, parent, p->mnt_mp, false);
2020 			unlock_mount_hash();
2021 		}
2022 	}
2023 	return res;
2024 out:
2025 	if (res) {
2026 		lock_mount_hash();
2027 		umount_tree(res, UMOUNT_SYNC);
2028 		unlock_mount_hash();
2029 	}
2030 	return q;
2031 }
2032 
2033 /* Caller should check returned pointer for errors */
2034 
2035 struct vfsmount *collect_mounts(const struct path *path)
2036 {
2037 	struct mount *tree;
2038 	namespace_lock();
2039 	if (!check_mnt(real_mount(path->mnt)))
2040 		tree = ERR_PTR(-EINVAL);
2041 	else
2042 		tree = copy_tree(real_mount(path->mnt), path->dentry,
2043 				 CL_COPY_ALL | CL_PRIVATE);
2044 	namespace_unlock();
2045 	if (IS_ERR(tree))
2046 		return ERR_CAST(tree);
2047 	return &tree->mnt;
2048 }
2049 
2050 static void free_mnt_ns(struct mnt_namespace *);
2051 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool);
2052 
2053 void dissolve_on_fput(struct vfsmount *mnt)
2054 {
2055 	struct mnt_namespace *ns;
2056 	namespace_lock();
2057 	lock_mount_hash();
2058 	ns = real_mount(mnt)->mnt_ns;
2059 	if (ns) {
2060 		if (is_anon_ns(ns))
2061 			umount_tree(real_mount(mnt), UMOUNT_CONNECTED);
2062 		else
2063 			ns = NULL;
2064 	}
2065 	unlock_mount_hash();
2066 	namespace_unlock();
2067 	if (ns)
2068 		free_mnt_ns(ns);
2069 }
2070 
2071 void drop_collected_mounts(struct vfsmount *mnt)
2072 {
2073 	namespace_lock();
2074 	lock_mount_hash();
2075 	umount_tree(real_mount(mnt), 0);
2076 	unlock_mount_hash();
2077 	namespace_unlock();
2078 }
2079 
2080 static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
2081 {
2082 	struct mount *child;
2083 
2084 	list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
2085 		if (!is_subdir(child->mnt_mountpoint, dentry))
2086 			continue;
2087 
2088 		if (child->mnt.mnt_flags & MNT_LOCKED)
2089 			return true;
2090 	}
2091 	return false;
2092 }
2093 
2094 /**
2095  * clone_private_mount - create a private clone of a path
2096  * @path: path to clone
2097  *
2098  * This creates a new vfsmount, which will be the clone of @path.  The new mount
2099  * will not be attached anywhere in the namespace and will be private (i.e.
2100  * changes to the originating mount won't be propagated into this).
2101  *
2102  * Release with mntput().
2103  */
2104 struct vfsmount *clone_private_mount(const struct path *path)
2105 {
2106 	struct mount *old_mnt = real_mount(path->mnt);
2107 	struct mount *new_mnt;
2108 
2109 	down_read(&namespace_sem);
2110 	if (IS_MNT_UNBINDABLE(old_mnt))
2111 		goto invalid;
2112 
2113 	if (!check_mnt(old_mnt))
2114 		goto invalid;
2115 
2116 	if (has_locked_children(old_mnt, path->dentry))
2117 		goto invalid;
2118 
2119 	new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
2120 	up_read(&namespace_sem);
2121 
2122 	if (IS_ERR(new_mnt))
2123 		return ERR_CAST(new_mnt);
2124 
2125 	/* Longterm mount to be removed by kern_unmount*() */
2126 	new_mnt->mnt_ns = MNT_NS_INTERNAL;
2127 
2128 	return &new_mnt->mnt;
2129 
2130 invalid:
2131 	up_read(&namespace_sem);
2132 	return ERR_PTR(-EINVAL);
2133 }
2134 EXPORT_SYMBOL_GPL(clone_private_mount);
2135 
2136 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
2137 		   struct vfsmount *root)
2138 {
2139 	struct mount *mnt;
2140 	int res = f(root, arg);
2141 	if (res)
2142 		return res;
2143 	list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
2144 		res = f(&mnt->mnt, arg);
2145 		if (res)
2146 			return res;
2147 	}
2148 	return 0;
2149 }
2150 
2151 static void lock_mnt_tree(struct mount *mnt)
2152 {
2153 	struct mount *p;
2154 
2155 	for (p = mnt; p; p = next_mnt(p, mnt)) {
2156 		int flags = p->mnt.mnt_flags;
2157 		/* Don't allow unprivileged users to change mount flags */
2158 		flags |= MNT_LOCK_ATIME;
2159 
2160 		if (flags & MNT_READONLY)
2161 			flags |= MNT_LOCK_READONLY;
2162 
2163 		if (flags & MNT_NODEV)
2164 			flags |= MNT_LOCK_NODEV;
2165 
2166 		if (flags & MNT_NOSUID)
2167 			flags |= MNT_LOCK_NOSUID;
2168 
2169 		if (flags & MNT_NOEXEC)
2170 			flags |= MNT_LOCK_NOEXEC;
2171 		/* Don't allow unprivileged users to reveal what is under a mount */
2172 		if (list_empty(&p->mnt_expire))
2173 			flags |= MNT_LOCKED;
2174 		p->mnt.mnt_flags = flags;
2175 	}
2176 }
2177 
2178 static void cleanup_group_ids(struct mount *mnt, struct mount *end)
2179 {
2180 	struct mount *p;
2181 
2182 	for (p = mnt; p != end; p = next_mnt(p, mnt)) {
2183 		if (p->mnt_group_id && !IS_MNT_SHARED(p))
2184 			mnt_release_group_id(p);
2185 	}
2186 }
2187 
2188 static int invent_group_ids(struct mount *mnt, bool recurse)
2189 {
2190 	struct mount *p;
2191 
2192 	for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
2193 		if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
2194 			int err = mnt_alloc_group_id(p);
2195 			if (err) {
2196 				cleanup_group_ids(mnt, p);
2197 				return err;
2198 			}
2199 		}
2200 	}
2201 
2202 	return 0;
2203 }
2204 
2205 int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
2206 {
2207 	unsigned int max = READ_ONCE(sysctl_mount_max);
2208 	unsigned int mounts = 0;
2209 	struct mount *p;
2210 
2211 	if (ns->mounts >= max)
2212 		return -ENOSPC;
2213 	max -= ns->mounts;
2214 	if (ns->pending_mounts >= max)
2215 		return -ENOSPC;
2216 	max -= ns->pending_mounts;
2217 
2218 	for (p = mnt; p; p = next_mnt(p, mnt))
2219 		mounts++;
2220 
2221 	if (mounts > max)
2222 		return -ENOSPC;
2223 
2224 	ns->pending_mounts += mounts;
2225 	return 0;
2226 }
2227 
2228 enum mnt_tree_flags_t {
2229 	MNT_TREE_MOVE = BIT(0),
2230 	MNT_TREE_BENEATH = BIT(1),
2231 };
2232 
2233 /**
2234  * attach_recursive_mnt - attach a source mount tree
2235  * @source_mnt: mount tree to be attached
2236  * @top_mnt:    mount that @source_mnt will be mounted on or mounted beneath
2237  * @dest_mp:    the mountpoint @source_mnt will be mounted at
2238  * @flags:      modify how @source_mnt is supposed to be attached
2239  *
2240  *  NOTE: in the table below explains the semantics when a source mount
2241  *  of a given type is attached to a destination mount of a given type.
2242  * ---------------------------------------------------------------------------
2243  * |         BIND MOUNT OPERATION                                            |
2244  * |**************************************************************************
2245  * | source-->| shared        |       private  |       slave    | unbindable |
2246  * | dest     |               |                |                |            |
2247  * |   |      |               |                |                |            |
2248  * |   v      |               |                |                |            |
2249  * |**************************************************************************
2250  * |  shared  | shared (++)   |     shared (+) |     shared(+++)|  invalid   |
2251  * |          |               |                |                |            |
2252  * |non-shared| shared (+)    |      private   |      slave (*) |  invalid   |
2253  * ***************************************************************************
2254  * A bind operation clones the source mount and mounts the clone on the
2255  * destination mount.
2256  *
2257  * (++)  the cloned mount is propagated to all the mounts in the propagation
2258  * 	 tree of the destination mount and the cloned mount is added to
2259  * 	 the peer group of the source mount.
2260  * (+)   the cloned mount is created under the destination mount and is marked
2261  *       as shared. The cloned mount is added to the peer group of the source
2262  *       mount.
2263  * (+++) the mount is propagated to all the mounts in the propagation tree
2264  *       of the destination mount and the cloned mount is made slave
2265  *       of the same master as that of the source mount. The cloned mount
2266  *       is marked as 'shared and slave'.
2267  * (*)   the cloned mount is made a slave of the same master as that of the
2268  * 	 source mount.
2269  *
2270  * ---------------------------------------------------------------------------
2271  * |         		MOVE MOUNT OPERATION                                 |
2272  * |**************************************************************************
2273  * | source-->| shared        |       private  |       slave    | unbindable |
2274  * | dest     |               |                |                |            |
2275  * |   |      |               |                |                |            |
2276  * |   v      |               |                |                |            |
2277  * |**************************************************************************
2278  * |  shared  | shared (+)    |     shared (+) |    shared(+++) |  invalid   |
2279  * |          |               |                |                |            |
2280  * |non-shared| shared (+*)   |      private   |    slave (*)   | unbindable |
2281  * ***************************************************************************
2282  *
2283  * (+)  the mount is moved to the destination. And is then propagated to
2284  * 	all the mounts in the propagation tree of the destination mount.
2285  * (+*)  the mount is moved to the destination.
2286  * (+++)  the mount is moved to the destination and is then propagated to
2287  * 	all the mounts belonging to the destination mount's propagation tree.
2288  * 	the mount is marked as 'shared and slave'.
2289  * (*)	the mount continues to be a slave at the new location.
2290  *
2291  * if the source mount is a tree, the operations explained above is
2292  * applied to each mount in the tree.
2293  * Must be called without spinlocks held, since this function can sleep
2294  * in allocations.
2295  *
2296  * Context: The function expects namespace_lock() to be held.
2297  * Return: If @source_mnt was successfully attached 0 is returned.
2298  *         Otherwise a negative error code is returned.
2299  */
2300 static int attach_recursive_mnt(struct mount *source_mnt,
2301 				struct mount *top_mnt,
2302 				struct mountpoint *dest_mp,
2303 				enum mnt_tree_flags_t flags)
2304 {
2305 	struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
2306 	HLIST_HEAD(tree_list);
2307 	struct mnt_namespace *ns = top_mnt->mnt_ns;
2308 	struct mountpoint *smp;
2309 	struct mount *child, *dest_mnt, *p;
2310 	struct hlist_node *n;
2311 	int err = 0;
2312 	bool moving = flags & MNT_TREE_MOVE, beneath = flags & MNT_TREE_BENEATH;
2313 
2314 	/*
2315 	 * Preallocate a mountpoint in case the new mounts need to be
2316 	 * mounted beneath mounts on the same mountpoint.
2317 	 */
2318 	smp = get_mountpoint(source_mnt->mnt.mnt_root);
2319 	if (IS_ERR(smp))
2320 		return PTR_ERR(smp);
2321 
2322 	/* Is there space to add these mounts to the mount namespace? */
2323 	if (!moving) {
2324 		err = count_mounts(ns, source_mnt);
2325 		if (err)
2326 			goto out;
2327 	}
2328 
2329 	if (beneath)
2330 		dest_mnt = top_mnt->mnt_parent;
2331 	else
2332 		dest_mnt = top_mnt;
2333 
2334 	if (IS_MNT_SHARED(dest_mnt)) {
2335 		err = invent_group_ids(source_mnt, true);
2336 		if (err)
2337 			goto out;
2338 		err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
2339 	}
2340 	lock_mount_hash();
2341 	if (err)
2342 		goto out_cleanup_ids;
2343 
2344 	if (IS_MNT_SHARED(dest_mnt)) {
2345 		for (p = source_mnt; p; p = next_mnt(p, source_mnt))
2346 			set_mnt_shared(p);
2347 	}
2348 
2349 	if (moving) {
2350 		if (beneath)
2351 			dest_mp = smp;
2352 		unhash_mnt(source_mnt);
2353 		attach_mnt(source_mnt, top_mnt, dest_mp, beneath);
2354 		touch_mnt_namespace(source_mnt->mnt_ns);
2355 	} else {
2356 		if (source_mnt->mnt_ns) {
2357 			/* move from anon - the caller will destroy */
2358 			list_del_init(&source_mnt->mnt_ns->list);
2359 		}
2360 		if (beneath)
2361 			mnt_set_mountpoint_beneath(source_mnt, top_mnt, smp);
2362 		else
2363 			mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
2364 		commit_tree(source_mnt);
2365 	}
2366 
2367 	hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
2368 		struct mount *q;
2369 		hlist_del_init(&child->mnt_hash);
2370 		q = __lookup_mnt(&child->mnt_parent->mnt,
2371 				 child->mnt_mountpoint);
2372 		if (q)
2373 			mnt_change_mountpoint(child, smp, q);
2374 		/* Notice when we are propagating across user namespaces */
2375 		if (child->mnt_parent->mnt_ns->user_ns != user_ns)
2376 			lock_mnt_tree(child);
2377 		child->mnt.mnt_flags &= ~MNT_LOCKED;
2378 		commit_tree(child);
2379 	}
2380 	put_mountpoint(smp);
2381 	unlock_mount_hash();
2382 
2383 	return 0;
2384 
2385  out_cleanup_ids:
2386 	while (!hlist_empty(&tree_list)) {
2387 		child = hlist_entry(tree_list.first, struct mount, mnt_hash);
2388 		child->mnt_parent->mnt_ns->pending_mounts = 0;
2389 		umount_tree(child, UMOUNT_SYNC);
2390 	}
2391 	unlock_mount_hash();
2392 	cleanup_group_ids(source_mnt, NULL);
2393  out:
2394 	ns->pending_mounts = 0;
2395 
2396 	read_seqlock_excl(&mount_lock);
2397 	put_mountpoint(smp);
2398 	read_sequnlock_excl(&mount_lock);
2399 
2400 	return err;
2401 }
2402 
2403 /**
2404  * do_lock_mount - lock mount and mountpoint
2405  * @path:    target path
2406  * @beneath: whether the intention is to mount beneath @path
2407  *
2408  * Follow the mount stack on @path until the top mount @mnt is found. If
2409  * the initial @path->{mnt,dentry} is a mountpoint lookup the first
2410  * mount stacked on top of it. Then simply follow @{mnt,mnt->mnt_root}
2411  * until nothing is stacked on top of it anymore.
2412  *
2413  * Acquire the inode_lock() on the top mount's ->mnt_root to protect
2414  * against concurrent removal of the new mountpoint from another mount
2415  * namespace.
2416  *
2417  * If @beneath is requested, acquire inode_lock() on @mnt's mountpoint
2418  * @mp on @mnt->mnt_parent must be acquired. This protects against a
2419  * concurrent unlink of @mp->mnt_dentry from another mount namespace
2420  * where @mnt doesn't have a child mount mounted @mp. A concurrent
2421  * removal of @mnt->mnt_root doesn't matter as nothing will be mounted
2422  * on top of it for @beneath.
2423  *
2424  * In addition, @beneath needs to make sure that @mnt hasn't been
2425  * unmounted or moved from its current mountpoint in between dropping
2426  * @mount_lock and acquiring @namespace_sem. For the !@beneath case @mnt
2427  * being unmounted would be detected later by e.g., calling
2428  * check_mnt(mnt) in the function it's called from. For the @beneath
2429  * case however, it's useful to detect it directly in do_lock_mount().
2430  * If @mnt hasn't been unmounted then @mnt->mnt_mountpoint still points
2431  * to @mnt->mnt_mp->m_dentry. But if @mnt has been unmounted it will
2432  * point to @mnt->mnt_root and @mnt->mnt_mp will be NULL.
2433  *
2434  * Return: Either the target mountpoint on the top mount or the top
2435  *         mount's mountpoint.
2436  */
2437 static struct mountpoint *do_lock_mount(struct path *path, bool beneath)
2438 {
2439 	struct vfsmount *mnt = path->mnt;
2440 	struct dentry *dentry;
2441 	struct mountpoint *mp = ERR_PTR(-ENOENT);
2442 
2443 	for (;;) {
2444 		struct mount *m;
2445 
2446 		if (beneath) {
2447 			m = real_mount(mnt);
2448 			read_seqlock_excl(&mount_lock);
2449 			dentry = dget(m->mnt_mountpoint);
2450 			read_sequnlock_excl(&mount_lock);
2451 		} else {
2452 			dentry = path->dentry;
2453 		}
2454 
2455 		inode_lock(dentry->d_inode);
2456 		if (unlikely(cant_mount(dentry))) {
2457 			inode_unlock(dentry->d_inode);
2458 			goto out;
2459 		}
2460 
2461 		namespace_lock();
2462 
2463 		if (beneath && (!is_mounted(mnt) || m->mnt_mountpoint != dentry)) {
2464 			namespace_unlock();
2465 			inode_unlock(dentry->d_inode);
2466 			goto out;
2467 		}
2468 
2469 		mnt = lookup_mnt(path);
2470 		if (likely(!mnt))
2471 			break;
2472 
2473 		namespace_unlock();
2474 		inode_unlock(dentry->d_inode);
2475 		if (beneath)
2476 			dput(dentry);
2477 		path_put(path);
2478 		path->mnt = mnt;
2479 		path->dentry = dget(mnt->mnt_root);
2480 	}
2481 
2482 	mp = get_mountpoint(dentry);
2483 	if (IS_ERR(mp)) {
2484 		namespace_unlock();
2485 		inode_unlock(dentry->d_inode);
2486 	}
2487 
2488 out:
2489 	if (beneath)
2490 		dput(dentry);
2491 
2492 	return mp;
2493 }
2494 
2495 static inline struct mountpoint *lock_mount(struct path *path)
2496 {
2497 	return do_lock_mount(path, false);
2498 }
2499 
2500 static void unlock_mount(struct mountpoint *where)
2501 {
2502 	struct dentry *dentry = where->m_dentry;
2503 
2504 	read_seqlock_excl(&mount_lock);
2505 	put_mountpoint(where);
2506 	read_sequnlock_excl(&mount_lock);
2507 
2508 	namespace_unlock();
2509 	inode_unlock(dentry->d_inode);
2510 }
2511 
2512 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
2513 {
2514 	if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER)
2515 		return -EINVAL;
2516 
2517 	if (d_is_dir(mp->m_dentry) !=
2518 	      d_is_dir(mnt->mnt.mnt_root))
2519 		return -ENOTDIR;
2520 
2521 	return attach_recursive_mnt(mnt, p, mp, 0);
2522 }
2523 
2524 /*
2525  * Sanity check the flags to change_mnt_propagation.
2526  */
2527 
2528 static int flags_to_propagation_type(int ms_flags)
2529 {
2530 	int type = ms_flags & ~(MS_REC | MS_SILENT);
2531 
2532 	/* Fail if any non-propagation flags are set */
2533 	if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2534 		return 0;
2535 	/* Only one propagation flag should be set */
2536 	if (!is_power_of_2(type))
2537 		return 0;
2538 	return type;
2539 }
2540 
2541 /*
2542  * recursively change the type of the mountpoint.
2543  */
2544 static int do_change_type(struct path *path, int ms_flags)
2545 {
2546 	struct mount *m;
2547 	struct mount *mnt = real_mount(path->mnt);
2548 	int recurse = ms_flags & MS_REC;
2549 	int type;
2550 	int err = 0;
2551 
2552 	if (!path_mounted(path))
2553 		return -EINVAL;
2554 
2555 	type = flags_to_propagation_type(ms_flags);
2556 	if (!type)
2557 		return -EINVAL;
2558 
2559 	namespace_lock();
2560 	if (type == MS_SHARED) {
2561 		err = invent_group_ids(mnt, recurse);
2562 		if (err)
2563 			goto out_unlock;
2564 	}
2565 
2566 	lock_mount_hash();
2567 	for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
2568 		change_mnt_propagation(m, type);
2569 	unlock_mount_hash();
2570 
2571  out_unlock:
2572 	namespace_unlock();
2573 	return err;
2574 }
2575 
2576 static struct mount *__do_loopback(struct path *old_path, int recurse)
2577 {
2578 	struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt);
2579 
2580 	if (IS_MNT_UNBINDABLE(old))
2581 		return mnt;
2582 
2583 	if (!check_mnt(old) && old_path->dentry->d_op != &ns_dentry_operations)
2584 		return mnt;
2585 
2586 	if (!recurse && has_locked_children(old, old_path->dentry))
2587 		return mnt;
2588 
2589 	if (recurse)
2590 		mnt = copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE);
2591 	else
2592 		mnt = clone_mnt(old, old_path->dentry, 0);
2593 
2594 	if (!IS_ERR(mnt))
2595 		mnt->mnt.mnt_flags &= ~MNT_LOCKED;
2596 
2597 	return mnt;
2598 }
2599 
2600 /*
2601  * do loopback mount.
2602  */
2603 static int do_loopback(struct path *path, const char *old_name,
2604 				int recurse)
2605 {
2606 	struct path old_path;
2607 	struct mount *mnt = NULL, *parent;
2608 	struct mountpoint *mp;
2609 	int err;
2610 	if (!old_name || !*old_name)
2611 		return -EINVAL;
2612 	err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
2613 	if (err)
2614 		return err;
2615 
2616 	err = -EINVAL;
2617 	if (mnt_ns_loop(old_path.dentry))
2618 		goto out;
2619 
2620 	mp = lock_mount(path);
2621 	if (IS_ERR(mp)) {
2622 		err = PTR_ERR(mp);
2623 		goto out;
2624 	}
2625 
2626 	parent = real_mount(path->mnt);
2627 	if (!check_mnt(parent))
2628 		goto out2;
2629 
2630 	mnt = __do_loopback(&old_path, recurse);
2631 	if (IS_ERR(mnt)) {
2632 		err = PTR_ERR(mnt);
2633 		goto out2;
2634 	}
2635 
2636 	err = graft_tree(mnt, parent, mp);
2637 	if (err) {
2638 		lock_mount_hash();
2639 		umount_tree(mnt, UMOUNT_SYNC);
2640 		unlock_mount_hash();
2641 	}
2642 out2:
2643 	unlock_mount(mp);
2644 out:
2645 	path_put(&old_path);
2646 	return err;
2647 }
2648 
2649 static struct file *open_detached_copy(struct path *path, bool recursive)
2650 {
2651 	struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
2652 	struct mnt_namespace *ns = alloc_mnt_ns(user_ns, true);
2653 	struct mount *mnt, *p;
2654 	struct file *file;
2655 
2656 	if (IS_ERR(ns))
2657 		return ERR_CAST(ns);
2658 
2659 	namespace_lock();
2660 	mnt = __do_loopback(path, recursive);
2661 	if (IS_ERR(mnt)) {
2662 		namespace_unlock();
2663 		free_mnt_ns(ns);
2664 		return ERR_CAST(mnt);
2665 	}
2666 
2667 	lock_mount_hash();
2668 	for (p = mnt; p; p = next_mnt(p, mnt)) {
2669 		p->mnt_ns = ns;
2670 		ns->mounts++;
2671 	}
2672 	ns->root = mnt;
2673 	list_add_tail(&ns->list, &mnt->mnt_list);
2674 	mntget(&mnt->mnt);
2675 	unlock_mount_hash();
2676 	namespace_unlock();
2677 
2678 	mntput(path->mnt);
2679 	path->mnt = &mnt->mnt;
2680 	file = dentry_open(path, O_PATH, current_cred());
2681 	if (IS_ERR(file))
2682 		dissolve_on_fput(path->mnt);
2683 	else
2684 		file->f_mode |= FMODE_NEED_UNMOUNT;
2685 	return file;
2686 }
2687 
2688 SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags)
2689 {
2690 	struct file *file;
2691 	struct path path;
2692 	int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
2693 	bool detached = flags & OPEN_TREE_CLONE;
2694 	int error;
2695 	int fd;
2696 
2697 	BUILD_BUG_ON(OPEN_TREE_CLOEXEC != O_CLOEXEC);
2698 
2699 	if (flags & ~(AT_EMPTY_PATH | AT_NO_AUTOMOUNT | AT_RECURSIVE |
2700 		      AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE |
2701 		      OPEN_TREE_CLOEXEC))
2702 		return -EINVAL;
2703 
2704 	if ((flags & (AT_RECURSIVE | OPEN_TREE_CLONE)) == AT_RECURSIVE)
2705 		return -EINVAL;
2706 
2707 	if (flags & AT_NO_AUTOMOUNT)
2708 		lookup_flags &= ~LOOKUP_AUTOMOUNT;
2709 	if (flags & AT_SYMLINK_NOFOLLOW)
2710 		lookup_flags &= ~LOOKUP_FOLLOW;
2711 	if (flags & AT_EMPTY_PATH)
2712 		lookup_flags |= LOOKUP_EMPTY;
2713 
2714 	if (detached && !may_mount())
2715 		return -EPERM;
2716 
2717 	fd = get_unused_fd_flags(flags & O_CLOEXEC);
2718 	if (fd < 0)
2719 		return fd;
2720 
2721 	error = user_path_at(dfd, filename, lookup_flags, &path);
2722 	if (unlikely(error)) {
2723 		file = ERR_PTR(error);
2724 	} else {
2725 		if (detached)
2726 			file = open_detached_copy(&path, flags & AT_RECURSIVE);
2727 		else
2728 			file = dentry_open(&path, O_PATH, current_cred());
2729 		path_put(&path);
2730 	}
2731 	if (IS_ERR(file)) {
2732 		put_unused_fd(fd);
2733 		return PTR_ERR(file);
2734 	}
2735 	fd_install(fd, file);
2736 	return fd;
2737 }
2738 
2739 /*
2740  * Don't allow locked mount flags to be cleared.
2741  *
2742  * No locks need to be held here while testing the various MNT_LOCK
2743  * flags because those flags can never be cleared once they are set.
2744  */
2745 static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags)
2746 {
2747 	unsigned int fl = mnt->mnt.mnt_flags;
2748 
2749 	if ((fl & MNT_LOCK_READONLY) &&
2750 	    !(mnt_flags & MNT_READONLY))
2751 		return false;
2752 
2753 	if ((fl & MNT_LOCK_NODEV) &&
2754 	    !(mnt_flags & MNT_NODEV))
2755 		return false;
2756 
2757 	if ((fl & MNT_LOCK_NOSUID) &&
2758 	    !(mnt_flags & MNT_NOSUID))
2759 		return false;
2760 
2761 	if ((fl & MNT_LOCK_NOEXEC) &&
2762 	    !(mnt_flags & MNT_NOEXEC))
2763 		return false;
2764 
2765 	if ((fl & MNT_LOCK_ATIME) &&
2766 	    ((fl & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK)))
2767 		return false;
2768 
2769 	return true;
2770 }
2771 
2772 static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags)
2773 {
2774 	bool readonly_request = (mnt_flags & MNT_READONLY);
2775 
2776 	if (readonly_request == __mnt_is_readonly(&mnt->mnt))
2777 		return 0;
2778 
2779 	if (readonly_request)
2780 		return mnt_make_readonly(mnt);
2781 
2782 	mnt->mnt.mnt_flags &= ~MNT_READONLY;
2783 	return 0;
2784 }
2785 
2786 static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags)
2787 {
2788 	mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
2789 	mnt->mnt.mnt_flags = mnt_flags;
2790 	touch_mnt_namespace(mnt->mnt_ns);
2791 }
2792 
2793 static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt)
2794 {
2795 	struct super_block *sb = mnt->mnt_sb;
2796 
2797 	if (!__mnt_is_readonly(mnt) &&
2798 	   (!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) &&
2799 	   (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) {
2800 		char *buf, *mntpath;
2801 
2802 		buf = (char *)__get_free_page(GFP_KERNEL);
2803 		if (buf)
2804 			mntpath = d_path(mountpoint, buf, PAGE_SIZE);
2805 		else
2806 			mntpath = ERR_PTR(-ENOMEM);
2807 		if (IS_ERR(mntpath))
2808 			mntpath = "(unknown)";
2809 
2810 		pr_warn("%s filesystem being %s at %s supports timestamps until %ptTd (0x%llx)\n",
2811 			sb->s_type->name,
2812 			is_mounted(mnt) ? "remounted" : "mounted",
2813 			mntpath, &sb->s_time_max,
2814 			(unsigned long long)sb->s_time_max);
2815 
2816 		sb->s_iflags |= SB_I_TS_EXPIRY_WARNED;
2817 		if (buf)
2818 			free_page((unsigned long)buf);
2819 	}
2820 }
2821 
2822 /*
2823  * Handle reconfiguration of the mountpoint only without alteration of the
2824  * superblock it refers to.  This is triggered by specifying MS_REMOUNT|MS_BIND
2825  * to mount(2).
2826  */
2827 static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags)
2828 {
2829 	struct super_block *sb = path->mnt->mnt_sb;
2830 	struct mount *mnt = real_mount(path->mnt);
2831 	int ret;
2832 
2833 	if (!check_mnt(mnt))
2834 		return -EINVAL;
2835 
2836 	if (!path_mounted(path))
2837 		return -EINVAL;
2838 
2839 	if (!can_change_locked_flags(mnt, mnt_flags))
2840 		return -EPERM;
2841 
2842 	/*
2843 	 * We're only checking whether the superblock is read-only not
2844 	 * changing it, so only take down_read(&sb->s_umount).
2845 	 */
2846 	down_read(&sb->s_umount);
2847 	lock_mount_hash();
2848 	ret = change_mount_ro_state(mnt, mnt_flags);
2849 	if (ret == 0)
2850 		set_mount_attributes(mnt, mnt_flags);
2851 	unlock_mount_hash();
2852 	up_read(&sb->s_umount);
2853 
2854 	mnt_warn_timestamp_expiry(path, &mnt->mnt);
2855 
2856 	return ret;
2857 }
2858 
2859 /*
2860  * change filesystem flags. dir should be a physical root of filesystem.
2861  * If you've mounted a non-root directory somewhere and want to do remount
2862  * on it - tough luck.
2863  */
2864 static int do_remount(struct path *path, int ms_flags, int sb_flags,
2865 		      int mnt_flags, void *data)
2866 {
2867 	int err;
2868 	struct super_block *sb = path->mnt->mnt_sb;
2869 	struct mount *mnt = real_mount(path->mnt);
2870 	struct fs_context *fc;
2871 
2872 	if (!check_mnt(mnt))
2873 		return -EINVAL;
2874 
2875 	if (!path_mounted(path))
2876 		return -EINVAL;
2877 
2878 	if (!can_change_locked_flags(mnt, mnt_flags))
2879 		return -EPERM;
2880 
2881 	fc = fs_context_for_reconfigure(path->dentry, sb_flags, MS_RMT_MASK);
2882 	if (IS_ERR(fc))
2883 		return PTR_ERR(fc);
2884 
2885 	/*
2886 	 * Indicate to the filesystem that the remount request is coming
2887 	 * from the legacy mount system call.
2888 	 */
2889 	fc->oldapi = true;
2890 
2891 	err = parse_monolithic_mount_data(fc, data);
2892 	if (!err) {
2893 		down_write(&sb->s_umount);
2894 		err = -EPERM;
2895 		if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) {
2896 			err = reconfigure_super(fc);
2897 			if (!err) {
2898 				lock_mount_hash();
2899 				set_mount_attributes(mnt, mnt_flags);
2900 				unlock_mount_hash();
2901 			}
2902 		}
2903 		up_write(&sb->s_umount);
2904 	}
2905 
2906 	mnt_warn_timestamp_expiry(path, &mnt->mnt);
2907 
2908 	put_fs_context(fc);
2909 	return err;
2910 }
2911 
2912 static inline int tree_contains_unbindable(struct mount *mnt)
2913 {
2914 	struct mount *p;
2915 	for (p = mnt; p; p = next_mnt(p, mnt)) {
2916 		if (IS_MNT_UNBINDABLE(p))
2917 			return 1;
2918 	}
2919 	return 0;
2920 }
2921 
2922 /*
2923  * Check that there aren't references to earlier/same mount namespaces in the
2924  * specified subtree.  Such references can act as pins for mount namespaces
2925  * that aren't checked by the mount-cycle checking code, thereby allowing
2926  * cycles to be made.
2927  */
2928 static bool check_for_nsfs_mounts(struct mount *subtree)
2929 {
2930 	struct mount *p;
2931 	bool ret = false;
2932 
2933 	lock_mount_hash();
2934 	for (p = subtree; p; p = next_mnt(p, subtree))
2935 		if (mnt_ns_loop(p->mnt.mnt_root))
2936 			goto out;
2937 
2938 	ret = true;
2939 out:
2940 	unlock_mount_hash();
2941 	return ret;
2942 }
2943 
2944 static int do_set_group(struct path *from_path, struct path *to_path)
2945 {
2946 	struct mount *from, *to;
2947 	int err;
2948 
2949 	from = real_mount(from_path->mnt);
2950 	to = real_mount(to_path->mnt);
2951 
2952 	namespace_lock();
2953 
2954 	err = -EINVAL;
2955 	/* To and From must be mounted */
2956 	if (!is_mounted(&from->mnt))
2957 		goto out;
2958 	if (!is_mounted(&to->mnt))
2959 		goto out;
2960 
2961 	err = -EPERM;
2962 	/* We should be allowed to modify mount namespaces of both mounts */
2963 	if (!ns_capable(from->mnt_ns->user_ns, CAP_SYS_ADMIN))
2964 		goto out;
2965 	if (!ns_capable(to->mnt_ns->user_ns, CAP_SYS_ADMIN))
2966 		goto out;
2967 
2968 	err = -EINVAL;
2969 	/* To and From paths should be mount roots */
2970 	if (!path_mounted(from_path))
2971 		goto out;
2972 	if (!path_mounted(to_path))
2973 		goto out;
2974 
2975 	/* Setting sharing groups is only allowed across same superblock */
2976 	if (from->mnt.mnt_sb != to->mnt.mnt_sb)
2977 		goto out;
2978 
2979 	/* From mount root should be wider than To mount root */
2980 	if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root))
2981 		goto out;
2982 
2983 	/* From mount should not have locked children in place of To's root */
2984 	if (has_locked_children(from, to->mnt.mnt_root))
2985 		goto out;
2986 
2987 	/* Setting sharing groups is only allowed on private mounts */
2988 	if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to))
2989 		goto out;
2990 
2991 	/* From should not be private */
2992 	if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from))
2993 		goto out;
2994 
2995 	if (IS_MNT_SLAVE(from)) {
2996 		struct mount *m = from->mnt_master;
2997 
2998 		list_add(&to->mnt_slave, &m->mnt_slave_list);
2999 		to->mnt_master = m;
3000 	}
3001 
3002 	if (IS_MNT_SHARED(from)) {
3003 		to->mnt_group_id = from->mnt_group_id;
3004 		list_add(&to->mnt_share, &from->mnt_share);
3005 		lock_mount_hash();
3006 		set_mnt_shared(to);
3007 		unlock_mount_hash();
3008 	}
3009 
3010 	err = 0;
3011 out:
3012 	namespace_unlock();
3013 	return err;
3014 }
3015 
3016 /**
3017  * path_overmounted - check if path is overmounted
3018  * @path: path to check
3019  *
3020  * Check if path is overmounted, i.e., if there's a mount on top of
3021  * @path->mnt with @path->dentry as mountpoint.
3022  *
3023  * Context: This function expects namespace_lock() to be held.
3024  * Return: If path is overmounted true is returned, false if not.
3025  */
3026 static inline bool path_overmounted(const struct path *path)
3027 {
3028 	rcu_read_lock();
3029 	if (unlikely(__lookup_mnt(path->mnt, path->dentry))) {
3030 		rcu_read_unlock();
3031 		return true;
3032 	}
3033 	rcu_read_unlock();
3034 	return false;
3035 }
3036 
3037 /**
3038  * can_move_mount_beneath - check that we can mount beneath the top mount
3039  * @from: mount to mount beneath
3040  * @to:   mount under which to mount
3041  *
3042  * - Make sure that @to->dentry is actually the root of a mount under
3043  *   which we can mount another mount.
3044  * - Make sure that nothing can be mounted beneath the caller's current
3045  *   root or the rootfs of the namespace.
3046  * - Make sure that the caller can unmount the topmost mount ensuring
3047  *   that the caller could reveal the underlying mountpoint.
3048  * - Ensure that nothing has been mounted on top of @from before we
3049  *   grabbed @namespace_sem to avoid creating pointless shadow mounts.
3050  * - Prevent mounting beneath a mount if the propagation relationship
3051  *   between the source mount, parent mount, and top mount would lead to
3052  *   nonsensical mount trees.
3053  *
3054  * Context: This function expects namespace_lock() to be held.
3055  * Return: On success 0, and on error a negative error code is returned.
3056  */
3057 static int can_move_mount_beneath(const struct path *from,
3058 				  const struct path *to,
3059 				  const struct mountpoint *mp)
3060 {
3061 	struct mount *mnt_from = real_mount(from->mnt),
3062 		     *mnt_to = real_mount(to->mnt),
3063 		     *parent_mnt_to = mnt_to->mnt_parent;
3064 
3065 	if (!mnt_has_parent(mnt_to))
3066 		return -EINVAL;
3067 
3068 	if (!path_mounted(to))
3069 		return -EINVAL;
3070 
3071 	if (IS_MNT_LOCKED(mnt_to))
3072 		return -EINVAL;
3073 
3074 	/* Avoid creating shadow mounts during mount propagation. */
3075 	if (path_overmounted(from))
3076 		return -EINVAL;
3077 
3078 	/*
3079 	 * Mounting beneath the rootfs only makes sense when the
3080 	 * semantics of pivot_root(".", ".") are used.
3081 	 */
3082 	if (&mnt_to->mnt == current->fs->root.mnt)
3083 		return -EINVAL;
3084 	if (parent_mnt_to == current->nsproxy->mnt_ns->root)
3085 		return -EINVAL;
3086 
3087 	for (struct mount *p = mnt_from; mnt_has_parent(p); p = p->mnt_parent)
3088 		if (p == mnt_to)
3089 			return -EINVAL;
3090 
3091 	/*
3092 	 * If the parent mount propagates to the child mount this would
3093 	 * mean mounting @mnt_from on @mnt_to->mnt_parent and then
3094 	 * propagating a copy @c of @mnt_from on top of @mnt_to. This
3095 	 * defeats the whole purpose of mounting beneath another mount.
3096 	 */
3097 	if (propagation_would_overmount(parent_mnt_to, mnt_to, mp))
3098 		return -EINVAL;
3099 
3100 	/*
3101 	 * If @mnt_to->mnt_parent propagates to @mnt_from this would
3102 	 * mean propagating a copy @c of @mnt_from on top of @mnt_from.
3103 	 * Afterwards @mnt_from would be mounted on top of
3104 	 * @mnt_to->mnt_parent and @mnt_to would be unmounted from
3105 	 * @mnt->mnt_parent and remounted on @mnt_from. But since @c is
3106 	 * already mounted on @mnt_from, @mnt_to would ultimately be
3107 	 * remounted on top of @c. Afterwards, @mnt_from would be
3108 	 * covered by a copy @c of @mnt_from and @c would be covered by
3109 	 * @mnt_from itself. This defeats the whole purpose of mounting
3110 	 * @mnt_from beneath @mnt_to.
3111 	 */
3112 	if (propagation_would_overmount(parent_mnt_to, mnt_from, mp))
3113 		return -EINVAL;
3114 
3115 	return 0;
3116 }
3117 
3118 static int do_move_mount(struct path *old_path, struct path *new_path,
3119 			 bool beneath)
3120 {
3121 	struct mnt_namespace *ns;
3122 	struct mount *p;
3123 	struct mount *old;
3124 	struct mount *parent;
3125 	struct mountpoint *mp, *old_mp;
3126 	int err;
3127 	bool attached;
3128 	enum mnt_tree_flags_t flags = 0;
3129 
3130 	mp = do_lock_mount(new_path, beneath);
3131 	if (IS_ERR(mp))
3132 		return PTR_ERR(mp);
3133 
3134 	old = real_mount(old_path->mnt);
3135 	p = real_mount(new_path->mnt);
3136 	parent = old->mnt_parent;
3137 	attached = mnt_has_parent(old);
3138 	if (attached)
3139 		flags |= MNT_TREE_MOVE;
3140 	old_mp = old->mnt_mp;
3141 	ns = old->mnt_ns;
3142 
3143 	err = -EINVAL;
3144 	/* The mountpoint must be in our namespace. */
3145 	if (!check_mnt(p))
3146 		goto out;
3147 
3148 	/* The thing moved must be mounted... */
3149 	if (!is_mounted(&old->mnt))
3150 		goto out;
3151 
3152 	/* ... and either ours or the root of anon namespace */
3153 	if (!(attached ? check_mnt(old) : is_anon_ns(ns)))
3154 		goto out;
3155 
3156 	if (old->mnt.mnt_flags & MNT_LOCKED)
3157 		goto out;
3158 
3159 	if (!path_mounted(old_path))
3160 		goto out;
3161 
3162 	if (d_is_dir(new_path->dentry) !=
3163 	    d_is_dir(old_path->dentry))
3164 		goto out;
3165 	/*
3166 	 * Don't move a mount residing in a shared parent.
3167 	 */
3168 	if (attached && IS_MNT_SHARED(parent))
3169 		goto out;
3170 
3171 	if (beneath) {
3172 		err = can_move_mount_beneath(old_path, new_path, mp);
3173 		if (err)
3174 			goto out;
3175 
3176 		err = -EINVAL;
3177 		p = p->mnt_parent;
3178 		flags |= MNT_TREE_BENEATH;
3179 	}
3180 
3181 	/*
3182 	 * Don't move a mount tree containing unbindable mounts to a destination
3183 	 * mount which is shared.
3184 	 */
3185 	if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
3186 		goto out;
3187 	err = -ELOOP;
3188 	if (!check_for_nsfs_mounts(old))
3189 		goto out;
3190 	for (; mnt_has_parent(p); p = p->mnt_parent)
3191 		if (p == old)
3192 			goto out;
3193 
3194 	err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp, flags);
3195 	if (err)
3196 		goto out;
3197 
3198 	/* if the mount is moved, it should no longer be expire
3199 	 * automatically */
3200 	list_del_init(&old->mnt_expire);
3201 	if (attached)
3202 		put_mountpoint(old_mp);
3203 out:
3204 	unlock_mount(mp);
3205 	if (!err) {
3206 		if (attached)
3207 			mntput_no_expire(parent);
3208 		else
3209 			free_mnt_ns(ns);
3210 	}
3211 	return err;
3212 }
3213 
3214 static int do_move_mount_old(struct path *path, const char *old_name)
3215 {
3216 	struct path old_path;
3217 	int err;
3218 
3219 	if (!old_name || !*old_name)
3220 		return -EINVAL;
3221 
3222 	err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
3223 	if (err)
3224 		return err;
3225 
3226 	err = do_move_mount(&old_path, path, false);
3227 	path_put(&old_path);
3228 	return err;
3229 }
3230 
3231 /*
3232  * add a mount into a namespace's mount tree
3233  */
3234 static int do_add_mount(struct mount *newmnt, struct mountpoint *mp,
3235 			const struct path *path, int mnt_flags)
3236 {
3237 	struct mount *parent = real_mount(path->mnt);
3238 
3239 	mnt_flags &= ~MNT_INTERNAL_FLAGS;
3240 
3241 	if (unlikely(!check_mnt(parent))) {
3242 		/* that's acceptable only for automounts done in private ns */
3243 		if (!(mnt_flags & MNT_SHRINKABLE))
3244 			return -EINVAL;
3245 		/* ... and for those we'd better have mountpoint still alive */
3246 		if (!parent->mnt_ns)
3247 			return -EINVAL;
3248 	}
3249 
3250 	/* Refuse the same filesystem on the same mount point */
3251 	if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && path_mounted(path))
3252 		return -EBUSY;
3253 
3254 	if (d_is_symlink(newmnt->mnt.mnt_root))
3255 		return -EINVAL;
3256 
3257 	newmnt->mnt.mnt_flags = mnt_flags;
3258 	return graft_tree(newmnt, parent, mp);
3259 }
3260 
3261 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags);
3262 
3263 /*
3264  * Create a new mount using a superblock configuration and request it
3265  * be added to the namespace tree.
3266  */
3267 static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
3268 			   unsigned int mnt_flags)
3269 {
3270 	struct vfsmount *mnt;
3271 	struct mountpoint *mp;
3272 	struct super_block *sb = fc->root->d_sb;
3273 	int error;
3274 
3275 	error = security_sb_kern_mount(sb);
3276 	if (!error && mount_too_revealing(sb, &mnt_flags))
3277 		error = -EPERM;
3278 
3279 	if (unlikely(error)) {
3280 		fc_drop_locked(fc);
3281 		return error;
3282 	}
3283 
3284 	up_write(&sb->s_umount);
3285 
3286 	mnt = vfs_create_mount(fc);
3287 	if (IS_ERR(mnt))
3288 		return PTR_ERR(mnt);
3289 
3290 	mnt_warn_timestamp_expiry(mountpoint, mnt);
3291 
3292 	mp = lock_mount(mountpoint);
3293 	if (IS_ERR(mp)) {
3294 		mntput(mnt);
3295 		return PTR_ERR(mp);
3296 	}
3297 	error = do_add_mount(real_mount(mnt), mp, mountpoint, mnt_flags);
3298 	unlock_mount(mp);
3299 	if (error < 0)
3300 		mntput(mnt);
3301 	return error;
3302 }
3303 
3304 /*
3305  * create a new mount for userspace and request it to be added into the
3306  * namespace's tree
3307  */
3308 static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
3309 			int mnt_flags, const char *name, void *data)
3310 {
3311 	struct file_system_type *type;
3312 	struct fs_context *fc;
3313 	const char *subtype = NULL;
3314 	int err = 0;
3315 
3316 	if (!fstype)
3317 		return -EINVAL;
3318 
3319 	type = get_fs_type(fstype);
3320 	if (!type)
3321 		return -ENODEV;
3322 
3323 	if (type->fs_flags & FS_HAS_SUBTYPE) {
3324 		subtype = strchr(fstype, '.');
3325 		if (subtype) {
3326 			subtype++;
3327 			if (!*subtype) {
3328 				put_filesystem(type);
3329 				return -EINVAL;
3330 			}
3331 		}
3332 	}
3333 
3334 	fc = fs_context_for_mount(type, sb_flags);
3335 	put_filesystem(type);
3336 	if (IS_ERR(fc))
3337 		return PTR_ERR(fc);
3338 
3339 	/*
3340 	 * Indicate to the filesystem that the mount request is coming
3341 	 * from the legacy mount system call.
3342 	 */
3343 	fc->oldapi = true;
3344 
3345 	if (subtype)
3346 		err = vfs_parse_fs_string(fc, "subtype",
3347 					  subtype, strlen(subtype));
3348 	if (!err && name)
3349 		err = vfs_parse_fs_string(fc, "source", name, strlen(name));
3350 	if (!err)
3351 		err = parse_monolithic_mount_data(fc, data);
3352 	if (!err && !mount_capable(fc))
3353 		err = -EPERM;
3354 	if (!err)
3355 		err = vfs_get_tree(fc);
3356 	if (!err)
3357 		err = do_new_mount_fc(fc, path, mnt_flags);
3358 
3359 	put_fs_context(fc);
3360 	return err;
3361 }
3362 
3363 int finish_automount(struct vfsmount *m, const struct path *path)
3364 {
3365 	struct dentry *dentry = path->dentry;
3366 	struct mountpoint *mp;
3367 	struct mount *mnt;
3368 	int err;
3369 
3370 	if (!m)
3371 		return 0;
3372 	if (IS_ERR(m))
3373 		return PTR_ERR(m);
3374 
3375 	mnt = real_mount(m);
3376 	/* The new mount record should have at least 2 refs to prevent it being
3377 	 * expired before we get a chance to add it
3378 	 */
3379 	BUG_ON(mnt_get_count(mnt) < 2);
3380 
3381 	if (m->mnt_sb == path->mnt->mnt_sb &&
3382 	    m->mnt_root == dentry) {
3383 		err = -ELOOP;
3384 		goto discard;
3385 	}
3386 
3387 	/*
3388 	 * we don't want to use lock_mount() - in this case finding something
3389 	 * that overmounts our mountpoint to be means "quitely drop what we've
3390 	 * got", not "try to mount it on top".
3391 	 */
3392 	inode_lock(dentry->d_inode);
3393 	namespace_lock();
3394 	if (unlikely(cant_mount(dentry))) {
3395 		err = -ENOENT;
3396 		goto discard_locked;
3397 	}
3398 	if (path_overmounted(path)) {
3399 		err = 0;
3400 		goto discard_locked;
3401 	}
3402 	mp = get_mountpoint(dentry);
3403 	if (IS_ERR(mp)) {
3404 		err = PTR_ERR(mp);
3405 		goto discard_locked;
3406 	}
3407 
3408 	err = do_add_mount(mnt, mp, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
3409 	unlock_mount(mp);
3410 	if (unlikely(err))
3411 		goto discard;
3412 	mntput(m);
3413 	return 0;
3414 
3415 discard_locked:
3416 	namespace_unlock();
3417 	inode_unlock(dentry->d_inode);
3418 discard:
3419 	/* remove m from any expiration list it may be on */
3420 	if (!list_empty(&mnt->mnt_expire)) {
3421 		namespace_lock();
3422 		list_del_init(&mnt->mnt_expire);
3423 		namespace_unlock();
3424 	}
3425 	mntput(m);
3426 	mntput(m);
3427 	return err;
3428 }
3429 
3430 /**
3431  * mnt_set_expiry - Put a mount on an expiration list
3432  * @mnt: The mount to list.
3433  * @expiry_list: The list to add the mount to.
3434  */
3435 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
3436 {
3437 	namespace_lock();
3438 
3439 	list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
3440 
3441 	namespace_unlock();
3442 }
3443 EXPORT_SYMBOL(mnt_set_expiry);
3444 
3445 /*
3446  * process a list of expirable mountpoints with the intent of discarding any
3447  * mountpoints that aren't in use and haven't been touched since last we came
3448  * here
3449  */
3450 void mark_mounts_for_expiry(struct list_head *mounts)
3451 {
3452 	struct mount *mnt, *next;
3453 	LIST_HEAD(graveyard);
3454 
3455 	if (list_empty(mounts))
3456 		return;
3457 
3458 	namespace_lock();
3459 	lock_mount_hash();
3460 
3461 	/* extract from the expiration list every vfsmount that matches the
3462 	 * following criteria:
3463 	 * - only referenced by its parent vfsmount
3464 	 * - still marked for expiry (marked on the last call here; marks are
3465 	 *   cleared by mntput())
3466 	 */
3467 	list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
3468 		if (!xchg(&mnt->mnt_expiry_mark, 1) ||
3469 			propagate_mount_busy(mnt, 1))
3470 			continue;
3471 		list_move(&mnt->mnt_expire, &graveyard);
3472 	}
3473 	while (!list_empty(&graveyard)) {
3474 		mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
3475 		touch_mnt_namespace(mnt->mnt_ns);
3476 		umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
3477 	}
3478 	unlock_mount_hash();
3479 	namespace_unlock();
3480 }
3481 
3482 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
3483 
3484 /*
3485  * Ripoff of 'select_parent()'
3486  *
3487  * search the list of submounts for a given mountpoint, and move any
3488  * shrinkable submounts to the 'graveyard' list.
3489  */
3490 static int select_submounts(struct mount *parent, struct list_head *graveyard)
3491 {
3492 	struct mount *this_parent = parent;
3493 	struct list_head *next;
3494 	int found = 0;
3495 
3496 repeat:
3497 	next = this_parent->mnt_mounts.next;
3498 resume:
3499 	while (next != &this_parent->mnt_mounts) {
3500 		struct list_head *tmp = next;
3501 		struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
3502 
3503 		next = tmp->next;
3504 		if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
3505 			continue;
3506 		/*
3507 		 * Descend a level if the d_mounts list is non-empty.
3508 		 */
3509 		if (!list_empty(&mnt->mnt_mounts)) {
3510 			this_parent = mnt;
3511 			goto repeat;
3512 		}
3513 
3514 		if (!propagate_mount_busy(mnt, 1)) {
3515 			list_move_tail(&mnt->mnt_expire, graveyard);
3516 			found++;
3517 		}
3518 	}
3519 	/*
3520 	 * All done at this level ... ascend and resume the search
3521 	 */
3522 	if (this_parent != parent) {
3523 		next = this_parent->mnt_child.next;
3524 		this_parent = this_parent->mnt_parent;
3525 		goto resume;
3526 	}
3527 	return found;
3528 }
3529 
3530 /*
3531  * process a list of expirable mountpoints with the intent of discarding any
3532  * submounts of a specific parent mountpoint
3533  *
3534  * mount_lock must be held for write
3535  */
3536 static void shrink_submounts(struct mount *mnt)
3537 {
3538 	LIST_HEAD(graveyard);
3539 	struct mount *m;
3540 
3541 	/* extract submounts of 'mountpoint' from the expiration list */
3542 	while (select_submounts(mnt, &graveyard)) {
3543 		while (!list_empty(&graveyard)) {
3544 			m = list_first_entry(&graveyard, struct mount,
3545 						mnt_expire);
3546 			touch_mnt_namespace(m->mnt_ns);
3547 			umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
3548 		}
3549 	}
3550 }
3551 
3552 static void *copy_mount_options(const void __user * data)
3553 {
3554 	char *copy;
3555 	unsigned left, offset;
3556 
3557 	if (!data)
3558 		return NULL;
3559 
3560 	copy = kmalloc(PAGE_SIZE, GFP_KERNEL);
3561 	if (!copy)
3562 		return ERR_PTR(-ENOMEM);
3563 
3564 	left = copy_from_user(copy, data, PAGE_SIZE);
3565 
3566 	/*
3567 	 * Not all architectures have an exact copy_from_user(). Resort to
3568 	 * byte at a time.
3569 	 */
3570 	offset = PAGE_SIZE - left;
3571 	while (left) {
3572 		char c;
3573 		if (get_user(c, (const char __user *)data + offset))
3574 			break;
3575 		copy[offset] = c;
3576 		left--;
3577 		offset++;
3578 	}
3579 
3580 	if (left == PAGE_SIZE) {
3581 		kfree(copy);
3582 		return ERR_PTR(-EFAULT);
3583 	}
3584 
3585 	return copy;
3586 }
3587 
3588 static char *copy_mount_string(const void __user *data)
3589 {
3590 	return data ? strndup_user(data, PATH_MAX) : NULL;
3591 }
3592 
3593 /*
3594  * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
3595  * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
3596  *
3597  * data is a (void *) that can point to any structure up to
3598  * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
3599  * information (or be NULL).
3600  *
3601  * Pre-0.97 versions of mount() didn't have a flags word.
3602  * When the flags word was introduced its top half was required
3603  * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
3604  * Therefore, if this magic number is present, it carries no information
3605  * and must be discarded.
3606  */
3607 int path_mount(const char *dev_name, struct path *path,
3608 		const char *type_page, unsigned long flags, void *data_page)
3609 {
3610 	unsigned int mnt_flags = 0, sb_flags;
3611 	int ret;
3612 
3613 	/* Discard magic */
3614 	if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
3615 		flags &= ~MS_MGC_MSK;
3616 
3617 	/* Basic sanity checks */
3618 	if (data_page)
3619 		((char *)data_page)[PAGE_SIZE - 1] = 0;
3620 
3621 	if (flags & MS_NOUSER)
3622 		return -EINVAL;
3623 
3624 	ret = security_sb_mount(dev_name, path, type_page, flags, data_page);
3625 	if (ret)
3626 		return ret;
3627 	if (!may_mount())
3628 		return -EPERM;
3629 	if (flags & SB_MANDLOCK)
3630 		warn_mandlock();
3631 
3632 	/* Default to relatime unless overriden */
3633 	if (!(flags & MS_NOATIME))
3634 		mnt_flags |= MNT_RELATIME;
3635 
3636 	/* Separate the per-mountpoint flags */
3637 	if (flags & MS_NOSUID)
3638 		mnt_flags |= MNT_NOSUID;
3639 	if (flags & MS_NODEV)
3640 		mnt_flags |= MNT_NODEV;
3641 	if (flags & MS_NOEXEC)
3642 		mnt_flags |= MNT_NOEXEC;
3643 	if (flags & MS_NOATIME)
3644 		mnt_flags |= MNT_NOATIME;
3645 	if (flags & MS_NODIRATIME)
3646 		mnt_flags |= MNT_NODIRATIME;
3647 	if (flags & MS_STRICTATIME)
3648 		mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
3649 	if (flags & MS_RDONLY)
3650 		mnt_flags |= MNT_READONLY;
3651 	if (flags & MS_NOSYMFOLLOW)
3652 		mnt_flags |= MNT_NOSYMFOLLOW;
3653 
3654 	/* The default atime for remount is preservation */
3655 	if ((flags & MS_REMOUNT) &&
3656 	    ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
3657 		       MS_STRICTATIME)) == 0)) {
3658 		mnt_flags &= ~MNT_ATIME_MASK;
3659 		mnt_flags |= path->mnt->mnt_flags & MNT_ATIME_MASK;
3660 	}
3661 
3662 	sb_flags = flags & (SB_RDONLY |
3663 			    SB_SYNCHRONOUS |
3664 			    SB_MANDLOCK |
3665 			    SB_DIRSYNC |
3666 			    SB_SILENT |
3667 			    SB_POSIXACL |
3668 			    SB_LAZYTIME |
3669 			    SB_I_VERSION);
3670 
3671 	if ((flags & (MS_REMOUNT | MS_BIND)) == (MS_REMOUNT | MS_BIND))
3672 		return do_reconfigure_mnt(path, mnt_flags);
3673 	if (flags & MS_REMOUNT)
3674 		return do_remount(path, flags, sb_flags, mnt_flags, data_page);
3675 	if (flags & MS_BIND)
3676 		return do_loopback(path, dev_name, flags & MS_REC);
3677 	if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
3678 		return do_change_type(path, flags);
3679 	if (flags & MS_MOVE)
3680 		return do_move_mount_old(path, dev_name);
3681 
3682 	return do_new_mount(path, type_page, sb_flags, mnt_flags, dev_name,
3683 			    data_page);
3684 }
3685 
3686 long do_mount(const char *dev_name, const char __user *dir_name,
3687 		const char *type_page, unsigned long flags, void *data_page)
3688 {
3689 	struct path path;
3690 	int ret;
3691 
3692 	ret = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path);
3693 	if (ret)
3694 		return ret;
3695 	ret = path_mount(dev_name, &path, type_page, flags, data_page);
3696 	path_put(&path);
3697 	return ret;
3698 }
3699 
3700 static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns)
3701 {
3702 	return inc_ucount(ns, current_euid(), UCOUNT_MNT_NAMESPACES);
3703 }
3704 
3705 static void dec_mnt_namespaces(struct ucounts *ucounts)
3706 {
3707 	dec_ucount(ucounts, UCOUNT_MNT_NAMESPACES);
3708 }
3709 
3710 static void free_mnt_ns(struct mnt_namespace *ns)
3711 {
3712 	if (!is_anon_ns(ns))
3713 		ns_free_inum(&ns->ns);
3714 	dec_mnt_namespaces(ns->ucounts);
3715 	put_user_ns(ns->user_ns);
3716 	kfree(ns);
3717 }
3718 
3719 /*
3720  * Assign a sequence number so we can detect when we attempt to bind
3721  * mount a reference to an older mount namespace into the current
3722  * mount namespace, preventing reference counting loops.  A 64bit
3723  * number incrementing at 10Ghz will take 12,427 years to wrap which
3724  * is effectively never, so we can ignore the possibility.
3725  */
3726 static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
3727 
3728 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool anon)
3729 {
3730 	struct mnt_namespace *new_ns;
3731 	struct ucounts *ucounts;
3732 	int ret;
3733 
3734 	ucounts = inc_mnt_namespaces(user_ns);
3735 	if (!ucounts)
3736 		return ERR_PTR(-ENOSPC);
3737 
3738 	new_ns = kzalloc(sizeof(struct mnt_namespace), GFP_KERNEL_ACCOUNT);
3739 	if (!new_ns) {
3740 		dec_mnt_namespaces(ucounts);
3741 		return ERR_PTR(-ENOMEM);
3742 	}
3743 	if (!anon) {
3744 		ret = ns_alloc_inum(&new_ns->ns);
3745 		if (ret) {
3746 			kfree(new_ns);
3747 			dec_mnt_namespaces(ucounts);
3748 			return ERR_PTR(ret);
3749 		}
3750 	}
3751 	new_ns->ns.ops = &mntns_operations;
3752 	if (!anon)
3753 		new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
3754 	refcount_set(&new_ns->ns.count, 1);
3755 	INIT_LIST_HEAD(&new_ns->list);
3756 	init_waitqueue_head(&new_ns->poll);
3757 	spin_lock_init(&new_ns->ns_lock);
3758 	new_ns->user_ns = get_user_ns(user_ns);
3759 	new_ns->ucounts = ucounts;
3760 	return new_ns;
3761 }
3762 
3763 __latent_entropy
3764 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
3765 		struct user_namespace *user_ns, struct fs_struct *new_fs)
3766 {
3767 	struct mnt_namespace *new_ns;
3768 	struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
3769 	struct mount *p, *q;
3770 	struct mount *old;
3771 	struct mount *new;
3772 	int copy_flags;
3773 
3774 	BUG_ON(!ns);
3775 
3776 	if (likely(!(flags & CLONE_NEWNS))) {
3777 		get_mnt_ns(ns);
3778 		return ns;
3779 	}
3780 
3781 	old = ns->root;
3782 
3783 	new_ns = alloc_mnt_ns(user_ns, false);
3784 	if (IS_ERR(new_ns))
3785 		return new_ns;
3786 
3787 	namespace_lock();
3788 	/* First pass: copy the tree topology */
3789 	copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
3790 	if (user_ns != ns->user_ns)
3791 		copy_flags |= CL_SHARED_TO_SLAVE;
3792 	new = copy_tree(old, old->mnt.mnt_root, copy_flags);
3793 	if (IS_ERR(new)) {
3794 		namespace_unlock();
3795 		free_mnt_ns(new_ns);
3796 		return ERR_CAST(new);
3797 	}
3798 	if (user_ns != ns->user_ns) {
3799 		lock_mount_hash();
3800 		lock_mnt_tree(new);
3801 		unlock_mount_hash();
3802 	}
3803 	new_ns->root = new;
3804 	list_add_tail(&new_ns->list, &new->mnt_list);
3805 
3806 	/*
3807 	 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
3808 	 * as belonging to new namespace.  We have already acquired a private
3809 	 * fs_struct, so tsk->fs->lock is not needed.
3810 	 */
3811 	p = old;
3812 	q = new;
3813 	while (p) {
3814 		q->mnt_ns = new_ns;
3815 		new_ns->mounts++;
3816 		if (new_fs) {
3817 			if (&p->mnt == new_fs->root.mnt) {
3818 				new_fs->root.mnt = mntget(&q->mnt);
3819 				rootmnt = &p->mnt;
3820 			}
3821 			if (&p->mnt == new_fs->pwd.mnt) {
3822 				new_fs->pwd.mnt = mntget(&q->mnt);
3823 				pwdmnt = &p->mnt;
3824 			}
3825 		}
3826 		p = next_mnt(p, old);
3827 		q = next_mnt(q, new);
3828 		if (!q)
3829 			break;
3830 		// an mntns binding we'd skipped?
3831 		while (p->mnt.mnt_root != q->mnt.mnt_root)
3832 			p = next_mnt(skip_mnt_tree(p), old);
3833 	}
3834 	namespace_unlock();
3835 
3836 	if (rootmnt)
3837 		mntput(rootmnt);
3838 	if (pwdmnt)
3839 		mntput(pwdmnt);
3840 
3841 	return new_ns;
3842 }
3843 
3844 struct dentry *mount_subtree(struct vfsmount *m, const char *name)
3845 {
3846 	struct mount *mnt = real_mount(m);
3847 	struct mnt_namespace *ns;
3848 	struct super_block *s;
3849 	struct path path;
3850 	int err;
3851 
3852 	ns = alloc_mnt_ns(&init_user_ns, true);
3853 	if (IS_ERR(ns)) {
3854 		mntput(m);
3855 		return ERR_CAST(ns);
3856 	}
3857 	mnt->mnt_ns = ns;
3858 	ns->root = mnt;
3859 	ns->mounts++;
3860 	list_add(&mnt->mnt_list, &ns->list);
3861 
3862 	err = vfs_path_lookup(m->mnt_root, m,
3863 			name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
3864 
3865 	put_mnt_ns(ns);
3866 
3867 	if (err)
3868 		return ERR_PTR(err);
3869 
3870 	/* trade a vfsmount reference for active sb one */
3871 	s = path.mnt->mnt_sb;
3872 	atomic_inc(&s->s_active);
3873 	mntput(path.mnt);
3874 	/* lock the sucker */
3875 	down_write(&s->s_umount);
3876 	/* ... and return the root of (sub)tree on it */
3877 	return path.dentry;
3878 }
3879 EXPORT_SYMBOL(mount_subtree);
3880 
3881 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
3882 		char __user *, type, unsigned long, flags, void __user *, data)
3883 {
3884 	int ret;
3885 	char *kernel_type;
3886 	char *kernel_dev;
3887 	void *options;
3888 
3889 	kernel_type = copy_mount_string(type);
3890 	ret = PTR_ERR(kernel_type);
3891 	if (IS_ERR(kernel_type))
3892 		goto out_type;
3893 
3894 	kernel_dev = copy_mount_string(dev_name);
3895 	ret = PTR_ERR(kernel_dev);
3896 	if (IS_ERR(kernel_dev))
3897 		goto out_dev;
3898 
3899 	options = copy_mount_options(data);
3900 	ret = PTR_ERR(options);
3901 	if (IS_ERR(options))
3902 		goto out_data;
3903 
3904 	ret = do_mount(kernel_dev, dir_name, kernel_type, flags, options);
3905 
3906 	kfree(options);
3907 out_data:
3908 	kfree(kernel_dev);
3909 out_dev:
3910 	kfree(kernel_type);
3911 out_type:
3912 	return ret;
3913 }
3914 
3915 #define FSMOUNT_VALID_FLAGS                                                    \
3916 	(MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV |            \
3917 	 MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME |       \
3918 	 MOUNT_ATTR_NOSYMFOLLOW)
3919 
3920 #define MOUNT_SETATTR_VALID_FLAGS (FSMOUNT_VALID_FLAGS | MOUNT_ATTR_IDMAP)
3921 
3922 #define MOUNT_SETATTR_PROPAGATION_FLAGS \
3923 	(MS_UNBINDABLE | MS_PRIVATE | MS_SLAVE | MS_SHARED)
3924 
3925 static unsigned int attr_flags_to_mnt_flags(u64 attr_flags)
3926 {
3927 	unsigned int mnt_flags = 0;
3928 
3929 	if (attr_flags & MOUNT_ATTR_RDONLY)
3930 		mnt_flags |= MNT_READONLY;
3931 	if (attr_flags & MOUNT_ATTR_NOSUID)
3932 		mnt_flags |= MNT_NOSUID;
3933 	if (attr_flags & MOUNT_ATTR_NODEV)
3934 		mnt_flags |= MNT_NODEV;
3935 	if (attr_flags & MOUNT_ATTR_NOEXEC)
3936 		mnt_flags |= MNT_NOEXEC;
3937 	if (attr_flags & MOUNT_ATTR_NODIRATIME)
3938 		mnt_flags |= MNT_NODIRATIME;
3939 	if (attr_flags & MOUNT_ATTR_NOSYMFOLLOW)
3940 		mnt_flags |= MNT_NOSYMFOLLOW;
3941 
3942 	return mnt_flags;
3943 }
3944 
3945 /*
3946  * Create a kernel mount representation for a new, prepared superblock
3947  * (specified by fs_fd) and attach to an open_tree-like file descriptor.
3948  */
3949 SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
3950 		unsigned int, attr_flags)
3951 {
3952 	struct mnt_namespace *ns;
3953 	struct fs_context *fc;
3954 	struct file *file;
3955 	struct path newmount;
3956 	struct mount *mnt;
3957 	struct fd f;
3958 	unsigned int mnt_flags = 0;
3959 	long ret;
3960 
3961 	if (!may_mount())
3962 		return -EPERM;
3963 
3964 	if ((flags & ~(FSMOUNT_CLOEXEC)) != 0)
3965 		return -EINVAL;
3966 
3967 	if (attr_flags & ~FSMOUNT_VALID_FLAGS)
3968 		return -EINVAL;
3969 
3970 	mnt_flags = attr_flags_to_mnt_flags(attr_flags);
3971 
3972 	switch (attr_flags & MOUNT_ATTR__ATIME) {
3973 	case MOUNT_ATTR_STRICTATIME:
3974 		break;
3975 	case MOUNT_ATTR_NOATIME:
3976 		mnt_flags |= MNT_NOATIME;
3977 		break;
3978 	case MOUNT_ATTR_RELATIME:
3979 		mnt_flags |= MNT_RELATIME;
3980 		break;
3981 	default:
3982 		return -EINVAL;
3983 	}
3984 
3985 	f = fdget(fs_fd);
3986 	if (!f.file)
3987 		return -EBADF;
3988 
3989 	ret = -EINVAL;
3990 	if (f.file->f_op != &fscontext_fops)
3991 		goto err_fsfd;
3992 
3993 	fc = f.file->private_data;
3994 
3995 	ret = mutex_lock_interruptible(&fc->uapi_mutex);
3996 	if (ret < 0)
3997 		goto err_fsfd;
3998 
3999 	/* There must be a valid superblock or we can't mount it */
4000 	ret = -EINVAL;
4001 	if (!fc->root)
4002 		goto err_unlock;
4003 
4004 	ret = -EPERM;
4005 	if (mount_too_revealing(fc->root->d_sb, &mnt_flags)) {
4006 		pr_warn("VFS: Mount too revealing\n");
4007 		goto err_unlock;
4008 	}
4009 
4010 	ret = -EBUSY;
4011 	if (fc->phase != FS_CONTEXT_AWAITING_MOUNT)
4012 		goto err_unlock;
4013 
4014 	if (fc->sb_flags & SB_MANDLOCK)
4015 		warn_mandlock();
4016 
4017 	newmount.mnt = vfs_create_mount(fc);
4018 	if (IS_ERR(newmount.mnt)) {
4019 		ret = PTR_ERR(newmount.mnt);
4020 		goto err_unlock;
4021 	}
4022 	newmount.dentry = dget(fc->root);
4023 	newmount.mnt->mnt_flags = mnt_flags;
4024 
4025 	/* We've done the mount bit - now move the file context into more or
4026 	 * less the same state as if we'd done an fspick().  We don't want to
4027 	 * do any memory allocation or anything like that at this point as we
4028 	 * don't want to have to handle any errors incurred.
4029 	 */
4030 	vfs_clean_context(fc);
4031 
4032 	ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, true);
4033 	if (IS_ERR(ns)) {
4034 		ret = PTR_ERR(ns);
4035 		goto err_path;
4036 	}
4037 	mnt = real_mount(newmount.mnt);
4038 	mnt->mnt_ns = ns;
4039 	ns->root = mnt;
4040 	ns->mounts = 1;
4041 	list_add(&mnt->mnt_list, &ns->list);
4042 	mntget(newmount.mnt);
4043 
4044 	/* Attach to an apparent O_PATH fd with a note that we need to unmount
4045 	 * it, not just simply put it.
4046 	 */
4047 	file = dentry_open(&newmount, O_PATH, fc->cred);
4048 	if (IS_ERR(file)) {
4049 		dissolve_on_fput(newmount.mnt);
4050 		ret = PTR_ERR(file);
4051 		goto err_path;
4052 	}
4053 	file->f_mode |= FMODE_NEED_UNMOUNT;
4054 
4055 	ret = get_unused_fd_flags((flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0);
4056 	if (ret >= 0)
4057 		fd_install(ret, file);
4058 	else
4059 		fput(file);
4060 
4061 err_path:
4062 	path_put(&newmount);
4063 err_unlock:
4064 	mutex_unlock(&fc->uapi_mutex);
4065 err_fsfd:
4066 	fdput(f);
4067 	return ret;
4068 }
4069 
4070 /*
4071  * Move a mount from one place to another.  In combination with
4072  * fsopen()/fsmount() this is used to install a new mount and in combination
4073  * with open_tree(OPEN_TREE_CLONE [| AT_RECURSIVE]) it can be used to copy
4074  * a mount subtree.
4075  *
4076  * Note the flags value is a combination of MOVE_MOUNT_* flags.
4077  */
4078 SYSCALL_DEFINE5(move_mount,
4079 		int, from_dfd, const char __user *, from_pathname,
4080 		int, to_dfd, const char __user *, to_pathname,
4081 		unsigned int, flags)
4082 {
4083 	struct path from_path, to_path;
4084 	unsigned int lflags;
4085 	int ret = 0;
4086 
4087 	if (!may_mount())
4088 		return -EPERM;
4089 
4090 	if (flags & ~MOVE_MOUNT__MASK)
4091 		return -EINVAL;
4092 
4093 	if ((flags & (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP)) ==
4094 	    (MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP))
4095 		return -EINVAL;
4096 
4097 	/* If someone gives a pathname, they aren't permitted to move
4098 	 * from an fd that requires unmount as we can't get at the flag
4099 	 * to clear it afterwards.
4100 	 */
4101 	lflags = 0;
4102 	if (flags & MOVE_MOUNT_F_SYMLINKS)	lflags |= LOOKUP_FOLLOW;
4103 	if (flags & MOVE_MOUNT_F_AUTOMOUNTS)	lflags |= LOOKUP_AUTOMOUNT;
4104 	if (flags & MOVE_MOUNT_F_EMPTY_PATH)	lflags |= LOOKUP_EMPTY;
4105 
4106 	ret = user_path_at(from_dfd, from_pathname, lflags, &from_path);
4107 	if (ret < 0)
4108 		return ret;
4109 
4110 	lflags = 0;
4111 	if (flags & MOVE_MOUNT_T_SYMLINKS)	lflags |= LOOKUP_FOLLOW;
4112 	if (flags & MOVE_MOUNT_T_AUTOMOUNTS)	lflags |= LOOKUP_AUTOMOUNT;
4113 	if (flags & MOVE_MOUNT_T_EMPTY_PATH)	lflags |= LOOKUP_EMPTY;
4114 
4115 	ret = user_path_at(to_dfd, to_pathname, lflags, &to_path);
4116 	if (ret < 0)
4117 		goto out_from;
4118 
4119 	ret = security_move_mount(&from_path, &to_path);
4120 	if (ret < 0)
4121 		goto out_to;
4122 
4123 	if (flags & MOVE_MOUNT_SET_GROUP)
4124 		ret = do_set_group(&from_path, &to_path);
4125 	else
4126 		ret = do_move_mount(&from_path, &to_path,
4127 				    (flags & MOVE_MOUNT_BENEATH));
4128 
4129 out_to:
4130 	path_put(&to_path);
4131 out_from:
4132 	path_put(&from_path);
4133 	return ret;
4134 }
4135 
4136 /*
4137  * Return true if path is reachable from root
4138  *
4139  * namespace_sem or mount_lock is held
4140  */
4141 bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
4142 			 const struct path *root)
4143 {
4144 	while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
4145 		dentry = mnt->mnt_mountpoint;
4146 		mnt = mnt->mnt_parent;
4147 	}
4148 	return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
4149 }
4150 
4151 bool path_is_under(const struct path *path1, const struct path *path2)
4152 {
4153 	bool res;
4154 	read_seqlock_excl(&mount_lock);
4155 	res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
4156 	read_sequnlock_excl(&mount_lock);
4157 	return res;
4158 }
4159 EXPORT_SYMBOL(path_is_under);
4160 
4161 /*
4162  * pivot_root Semantics:
4163  * Moves the root file system of the current process to the directory put_old,
4164  * makes new_root as the new root file system of the current process, and sets
4165  * root/cwd of all processes which had them on the current root to new_root.
4166  *
4167  * Restrictions:
4168  * The new_root and put_old must be directories, and  must not be on the
4169  * same file  system as the current process root. The put_old  must  be
4170  * underneath new_root,  i.e. adding a non-zero number of /.. to the string
4171  * pointed to by put_old must yield the same directory as new_root. No other
4172  * file system may be mounted on put_old. After all, new_root is a mountpoint.
4173  *
4174  * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
4175  * See Documentation/filesystems/ramfs-rootfs-initramfs.rst for alternatives
4176  * in this situation.
4177  *
4178  * Notes:
4179  *  - we don't move root/cwd if they are not at the root (reason: if something
4180  *    cared enough to change them, it's probably wrong to force them elsewhere)
4181  *  - it's okay to pick a root that isn't the root of a file system, e.g.
4182  *    /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
4183  *    though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
4184  *    first.
4185  */
4186 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
4187 		const char __user *, put_old)
4188 {
4189 	struct path new, old, root;
4190 	struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent;
4191 	struct mountpoint *old_mp, *root_mp;
4192 	int error;
4193 
4194 	if (!may_mount())
4195 		return -EPERM;
4196 
4197 	error = user_path_at(AT_FDCWD, new_root,
4198 			     LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &new);
4199 	if (error)
4200 		goto out0;
4201 
4202 	error = user_path_at(AT_FDCWD, put_old,
4203 			     LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old);
4204 	if (error)
4205 		goto out1;
4206 
4207 	error = security_sb_pivotroot(&old, &new);
4208 	if (error)
4209 		goto out2;
4210 
4211 	get_fs_root(current->fs, &root);
4212 	old_mp = lock_mount(&old);
4213 	error = PTR_ERR(old_mp);
4214 	if (IS_ERR(old_mp))
4215 		goto out3;
4216 
4217 	error = -EINVAL;
4218 	new_mnt = real_mount(new.mnt);
4219 	root_mnt = real_mount(root.mnt);
4220 	old_mnt = real_mount(old.mnt);
4221 	ex_parent = new_mnt->mnt_parent;
4222 	root_parent = root_mnt->mnt_parent;
4223 	if (IS_MNT_SHARED(old_mnt) ||
4224 		IS_MNT_SHARED(ex_parent) ||
4225 		IS_MNT_SHARED(root_parent))
4226 		goto out4;
4227 	if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
4228 		goto out4;
4229 	if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
4230 		goto out4;
4231 	error = -ENOENT;
4232 	if (d_unlinked(new.dentry))
4233 		goto out4;
4234 	error = -EBUSY;
4235 	if (new_mnt == root_mnt || old_mnt == root_mnt)
4236 		goto out4; /* loop, on the same file system  */
4237 	error = -EINVAL;
4238 	if (!path_mounted(&root))
4239 		goto out4; /* not a mountpoint */
4240 	if (!mnt_has_parent(root_mnt))
4241 		goto out4; /* not attached */
4242 	if (!path_mounted(&new))
4243 		goto out4; /* not a mountpoint */
4244 	if (!mnt_has_parent(new_mnt))
4245 		goto out4; /* not attached */
4246 	/* make sure we can reach put_old from new_root */
4247 	if (!is_path_reachable(old_mnt, old.dentry, &new))
4248 		goto out4;
4249 	/* make certain new is below the root */
4250 	if (!is_path_reachable(new_mnt, new.dentry, &root))
4251 		goto out4;
4252 	lock_mount_hash();
4253 	umount_mnt(new_mnt);
4254 	root_mp = unhash_mnt(root_mnt);  /* we'll need its mountpoint */
4255 	if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
4256 		new_mnt->mnt.mnt_flags |= MNT_LOCKED;
4257 		root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
4258 	}
4259 	/* mount old root on put_old */
4260 	attach_mnt(root_mnt, old_mnt, old_mp, false);
4261 	/* mount new_root on / */
4262 	attach_mnt(new_mnt, root_parent, root_mp, false);
4263 	mnt_add_count(root_parent, -1);
4264 	touch_mnt_namespace(current->nsproxy->mnt_ns);
4265 	/* A moved mount should not expire automatically */
4266 	list_del_init(&new_mnt->mnt_expire);
4267 	put_mountpoint(root_mp);
4268 	unlock_mount_hash();
4269 	chroot_fs_refs(&root, &new);
4270 	error = 0;
4271 out4:
4272 	unlock_mount(old_mp);
4273 	if (!error)
4274 		mntput_no_expire(ex_parent);
4275 out3:
4276 	path_put(&root);
4277 out2:
4278 	path_put(&old);
4279 out1:
4280 	path_put(&new);
4281 out0:
4282 	return error;
4283 }
4284 
4285 static unsigned int recalc_flags(struct mount_kattr *kattr, struct mount *mnt)
4286 {
4287 	unsigned int flags = mnt->mnt.mnt_flags;
4288 
4289 	/*  flags to clear */
4290 	flags &= ~kattr->attr_clr;
4291 	/* flags to raise */
4292 	flags |= kattr->attr_set;
4293 
4294 	return flags;
4295 }
4296 
4297 static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
4298 {
4299 	struct vfsmount *m = &mnt->mnt;
4300 	struct user_namespace *fs_userns = m->mnt_sb->s_user_ns;
4301 
4302 	if (!kattr->mnt_idmap)
4303 		return 0;
4304 
4305 	/*
4306 	 * Creating an idmapped mount with the filesystem wide idmapping
4307 	 * doesn't make sense so block that. We don't allow mushy semantics.
4308 	 */
4309 	if (!check_fsmapping(kattr->mnt_idmap, m->mnt_sb))
4310 		return -EINVAL;
4311 
4312 	/*
4313 	 * Once a mount has been idmapped we don't allow it to change its
4314 	 * mapping. It makes things simpler and callers can just create
4315 	 * another bind-mount they can idmap if they want to.
4316 	 */
4317 	if (is_idmapped_mnt(m))
4318 		return -EPERM;
4319 
4320 	/* The underlying filesystem doesn't support idmapped mounts yet. */
4321 	if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
4322 		return -EINVAL;
4323 
4324 	/* We're not controlling the superblock. */
4325 	if (!ns_capable(fs_userns, CAP_SYS_ADMIN))
4326 		return -EPERM;
4327 
4328 	/* Mount has already been visible in the filesystem hierarchy. */
4329 	if (!is_anon_ns(mnt->mnt_ns))
4330 		return -EINVAL;
4331 
4332 	return 0;
4333 }
4334 
4335 /**
4336  * mnt_allow_writers() - check whether the attribute change allows writers
4337  * @kattr: the new mount attributes
4338  * @mnt: the mount to which @kattr will be applied
4339  *
4340  * Check whether thew new mount attributes in @kattr allow concurrent writers.
4341  *
4342  * Return: true if writers need to be held, false if not
4343  */
4344 static inline bool mnt_allow_writers(const struct mount_kattr *kattr,
4345 				     const struct mount *mnt)
4346 {
4347 	return (!(kattr->attr_set & MNT_READONLY) ||
4348 		(mnt->mnt.mnt_flags & MNT_READONLY)) &&
4349 	       !kattr->mnt_idmap;
4350 }
4351 
4352 static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt)
4353 {
4354 	struct mount *m;
4355 	int err;
4356 
4357 	for (m = mnt; m; m = next_mnt(m, mnt)) {
4358 		if (!can_change_locked_flags(m, recalc_flags(kattr, m))) {
4359 			err = -EPERM;
4360 			break;
4361 		}
4362 
4363 		err = can_idmap_mount(kattr, m);
4364 		if (err)
4365 			break;
4366 
4367 		if (!mnt_allow_writers(kattr, m)) {
4368 			err = mnt_hold_writers(m);
4369 			if (err)
4370 				break;
4371 		}
4372 
4373 		if (!kattr->recurse)
4374 			return 0;
4375 	}
4376 
4377 	if (err) {
4378 		struct mount *p;
4379 
4380 		/*
4381 		 * If we had to call mnt_hold_writers() MNT_WRITE_HOLD will
4382 		 * be set in @mnt_flags. The loop unsets MNT_WRITE_HOLD for all
4383 		 * mounts and needs to take care to include the first mount.
4384 		 */
4385 		for (p = mnt; p; p = next_mnt(p, mnt)) {
4386 			/* If we had to hold writers unblock them. */
4387 			if (p->mnt.mnt_flags & MNT_WRITE_HOLD)
4388 				mnt_unhold_writers(p);
4389 
4390 			/*
4391 			 * We're done once the first mount we changed got
4392 			 * MNT_WRITE_HOLD unset.
4393 			 */
4394 			if (p == m)
4395 				break;
4396 		}
4397 	}
4398 	return err;
4399 }
4400 
4401 static void do_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
4402 {
4403 	if (!kattr->mnt_idmap)
4404 		return;
4405 
4406 	/*
4407 	 * Pairs with smp_load_acquire() in mnt_idmap().
4408 	 *
4409 	 * Since we only allow a mount to change the idmapping once and
4410 	 * verified this in can_idmap_mount() we know that the mount has
4411 	 * @nop_mnt_idmap attached to it. So there's no need to drop any
4412 	 * references.
4413 	 */
4414 	smp_store_release(&mnt->mnt.mnt_idmap, mnt_idmap_get(kattr->mnt_idmap));
4415 }
4416 
4417 static void mount_setattr_commit(struct mount_kattr *kattr, struct mount *mnt)
4418 {
4419 	struct mount *m;
4420 
4421 	for (m = mnt; m; m = next_mnt(m, mnt)) {
4422 		unsigned int flags;
4423 
4424 		do_idmap_mount(kattr, m);
4425 		flags = recalc_flags(kattr, m);
4426 		WRITE_ONCE(m->mnt.mnt_flags, flags);
4427 
4428 		/* If we had to hold writers unblock them. */
4429 		if (m->mnt.mnt_flags & MNT_WRITE_HOLD)
4430 			mnt_unhold_writers(m);
4431 
4432 		if (kattr->propagation)
4433 			change_mnt_propagation(m, kattr->propagation);
4434 		if (!kattr->recurse)
4435 			break;
4436 	}
4437 	touch_mnt_namespace(mnt->mnt_ns);
4438 }
4439 
4440 static int do_mount_setattr(struct path *path, struct mount_kattr *kattr)
4441 {
4442 	struct mount *mnt = real_mount(path->mnt);
4443 	int err = 0;
4444 
4445 	if (!path_mounted(path))
4446 		return -EINVAL;
4447 
4448 	if (kattr->mnt_userns) {
4449 		struct mnt_idmap *mnt_idmap;
4450 
4451 		mnt_idmap = alloc_mnt_idmap(kattr->mnt_userns);
4452 		if (IS_ERR(mnt_idmap))
4453 			return PTR_ERR(mnt_idmap);
4454 		kattr->mnt_idmap = mnt_idmap;
4455 	}
4456 
4457 	if (kattr->propagation) {
4458 		/*
4459 		 * Only take namespace_lock() if we're actually changing
4460 		 * propagation.
4461 		 */
4462 		namespace_lock();
4463 		if (kattr->propagation == MS_SHARED) {
4464 			err = invent_group_ids(mnt, kattr->recurse);
4465 			if (err) {
4466 				namespace_unlock();
4467 				return err;
4468 			}
4469 		}
4470 	}
4471 
4472 	err = -EINVAL;
4473 	lock_mount_hash();
4474 
4475 	/* Ensure that this isn't anything purely vfs internal. */
4476 	if (!is_mounted(&mnt->mnt))
4477 		goto out;
4478 
4479 	/*
4480 	 * If this is an attached mount make sure it's located in the callers
4481 	 * mount namespace. If it's not don't let the caller interact with it.
4482 	 *
4483 	 * If this mount doesn't have a parent it's most often simply a
4484 	 * detached mount with an anonymous mount namespace. IOW, something
4485 	 * that's simply not attached yet. But there are apparently also users
4486 	 * that do change mount properties on the rootfs itself. That obviously
4487 	 * neither has a parent nor is it a detached mount so we cannot
4488 	 * unconditionally check for detached mounts.
4489 	 */
4490 	if ((mnt_has_parent(mnt) || !is_anon_ns(mnt->mnt_ns)) && !check_mnt(mnt))
4491 		goto out;
4492 
4493 	/*
4494 	 * First, we get the mount tree in a shape where we can change mount
4495 	 * properties without failure. If we succeeded to do so we commit all
4496 	 * changes and if we failed we clean up.
4497 	 */
4498 	err = mount_setattr_prepare(kattr, mnt);
4499 	if (!err)
4500 		mount_setattr_commit(kattr, mnt);
4501 
4502 out:
4503 	unlock_mount_hash();
4504 
4505 	if (kattr->propagation) {
4506 		if (err)
4507 			cleanup_group_ids(mnt, NULL);
4508 		namespace_unlock();
4509 	}
4510 
4511 	return err;
4512 }
4513 
4514 static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
4515 				struct mount_kattr *kattr, unsigned int flags)
4516 {
4517 	int err = 0;
4518 	struct ns_common *ns;
4519 	struct user_namespace *mnt_userns;
4520 	struct fd f;
4521 
4522 	if (!((attr->attr_set | attr->attr_clr) & MOUNT_ATTR_IDMAP))
4523 		return 0;
4524 
4525 	/*
4526 	 * We currently do not support clearing an idmapped mount. If this ever
4527 	 * is a use-case we can revisit this but for now let's keep it simple
4528 	 * and not allow it.
4529 	 */
4530 	if (attr->attr_clr & MOUNT_ATTR_IDMAP)
4531 		return -EINVAL;
4532 
4533 	if (attr->userns_fd > INT_MAX)
4534 		return -EINVAL;
4535 
4536 	f = fdget(attr->userns_fd);
4537 	if (!f.file)
4538 		return -EBADF;
4539 
4540 	if (!proc_ns_file(f.file)) {
4541 		err = -EINVAL;
4542 		goto out_fput;
4543 	}
4544 
4545 	ns = get_proc_ns(file_inode(f.file));
4546 	if (ns->ops->type != CLONE_NEWUSER) {
4547 		err = -EINVAL;
4548 		goto out_fput;
4549 	}
4550 
4551 	/*
4552 	 * The initial idmapping cannot be used to create an idmapped
4553 	 * mount. We use the initial idmapping as an indicator of a mount
4554 	 * that is not idmapped. It can simply be passed into helpers that
4555 	 * are aware of idmapped mounts as a convenient shortcut. A user
4556 	 * can just create a dedicated identity mapping to achieve the same
4557 	 * result.
4558 	 */
4559 	mnt_userns = container_of(ns, struct user_namespace, ns);
4560 	if (mnt_userns == &init_user_ns) {
4561 		err = -EPERM;
4562 		goto out_fput;
4563 	}
4564 
4565 	/* We're not controlling the target namespace. */
4566 	if (!ns_capable(mnt_userns, CAP_SYS_ADMIN)) {
4567 		err = -EPERM;
4568 		goto out_fput;
4569 	}
4570 
4571 	kattr->mnt_userns = get_user_ns(mnt_userns);
4572 
4573 out_fput:
4574 	fdput(f);
4575 	return err;
4576 }
4577 
4578 static int build_mount_kattr(const struct mount_attr *attr, size_t usize,
4579 			     struct mount_kattr *kattr, unsigned int flags)
4580 {
4581 	unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
4582 
4583 	if (flags & AT_NO_AUTOMOUNT)
4584 		lookup_flags &= ~LOOKUP_AUTOMOUNT;
4585 	if (flags & AT_SYMLINK_NOFOLLOW)
4586 		lookup_flags &= ~LOOKUP_FOLLOW;
4587 	if (flags & AT_EMPTY_PATH)
4588 		lookup_flags |= LOOKUP_EMPTY;
4589 
4590 	*kattr = (struct mount_kattr) {
4591 		.lookup_flags	= lookup_flags,
4592 		.recurse	= !!(flags & AT_RECURSIVE),
4593 	};
4594 
4595 	if (attr->propagation & ~MOUNT_SETATTR_PROPAGATION_FLAGS)
4596 		return -EINVAL;
4597 	if (hweight32(attr->propagation & MOUNT_SETATTR_PROPAGATION_FLAGS) > 1)
4598 		return -EINVAL;
4599 	kattr->propagation = attr->propagation;
4600 
4601 	if ((attr->attr_set | attr->attr_clr) & ~MOUNT_SETATTR_VALID_FLAGS)
4602 		return -EINVAL;
4603 
4604 	kattr->attr_set = attr_flags_to_mnt_flags(attr->attr_set);
4605 	kattr->attr_clr = attr_flags_to_mnt_flags(attr->attr_clr);
4606 
4607 	/*
4608 	 * Since the MOUNT_ATTR_<atime> values are an enum, not a bitmap,
4609 	 * users wanting to transition to a different atime setting cannot
4610 	 * simply specify the atime setting in @attr_set, but must also
4611 	 * specify MOUNT_ATTR__ATIME in the @attr_clr field.
4612 	 * So ensure that MOUNT_ATTR__ATIME can't be partially set in
4613 	 * @attr_clr and that @attr_set can't have any atime bits set if
4614 	 * MOUNT_ATTR__ATIME isn't set in @attr_clr.
4615 	 */
4616 	if (attr->attr_clr & MOUNT_ATTR__ATIME) {
4617 		if ((attr->attr_clr & MOUNT_ATTR__ATIME) != MOUNT_ATTR__ATIME)
4618 			return -EINVAL;
4619 
4620 		/*
4621 		 * Clear all previous time settings as they are mutually
4622 		 * exclusive.
4623 		 */
4624 		kattr->attr_clr |= MNT_RELATIME | MNT_NOATIME;
4625 		switch (attr->attr_set & MOUNT_ATTR__ATIME) {
4626 		case MOUNT_ATTR_RELATIME:
4627 			kattr->attr_set |= MNT_RELATIME;
4628 			break;
4629 		case MOUNT_ATTR_NOATIME:
4630 			kattr->attr_set |= MNT_NOATIME;
4631 			break;
4632 		case MOUNT_ATTR_STRICTATIME:
4633 			break;
4634 		default:
4635 			return -EINVAL;
4636 		}
4637 	} else {
4638 		if (attr->attr_set & MOUNT_ATTR__ATIME)
4639 			return -EINVAL;
4640 	}
4641 
4642 	return build_mount_idmapped(attr, usize, kattr, flags);
4643 }
4644 
4645 static void finish_mount_kattr(struct mount_kattr *kattr)
4646 {
4647 	put_user_ns(kattr->mnt_userns);
4648 	kattr->mnt_userns = NULL;
4649 
4650 	if (kattr->mnt_idmap)
4651 		mnt_idmap_put(kattr->mnt_idmap);
4652 }
4653 
4654 SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
4655 		unsigned int, flags, struct mount_attr __user *, uattr,
4656 		size_t, usize)
4657 {
4658 	int err;
4659 	struct path target;
4660 	struct mount_attr attr;
4661 	struct mount_kattr kattr;
4662 
4663 	BUILD_BUG_ON(sizeof(struct mount_attr) != MOUNT_ATTR_SIZE_VER0);
4664 
4665 	if (flags & ~(AT_EMPTY_PATH |
4666 		      AT_RECURSIVE |
4667 		      AT_SYMLINK_NOFOLLOW |
4668 		      AT_NO_AUTOMOUNT))
4669 		return -EINVAL;
4670 
4671 	if (unlikely(usize > PAGE_SIZE))
4672 		return -E2BIG;
4673 	if (unlikely(usize < MOUNT_ATTR_SIZE_VER0))
4674 		return -EINVAL;
4675 
4676 	if (!may_mount())
4677 		return -EPERM;
4678 
4679 	err = copy_struct_from_user(&attr, sizeof(attr), uattr, usize);
4680 	if (err)
4681 		return err;
4682 
4683 	/* Don't bother walking through the mounts if this is a nop. */
4684 	if (attr.attr_set == 0 &&
4685 	    attr.attr_clr == 0 &&
4686 	    attr.propagation == 0)
4687 		return 0;
4688 
4689 	err = build_mount_kattr(&attr, usize, &kattr, flags);
4690 	if (err)
4691 		return err;
4692 
4693 	err = user_path_at(dfd, path, kattr.lookup_flags, &target);
4694 	if (!err) {
4695 		err = do_mount_setattr(&target, &kattr);
4696 		path_put(&target);
4697 	}
4698 	finish_mount_kattr(&kattr);
4699 	return err;
4700 }
4701 
4702 static void __init init_mount_tree(void)
4703 {
4704 	struct vfsmount *mnt;
4705 	struct mount *m;
4706 	struct mnt_namespace *ns;
4707 	struct path root;
4708 
4709 	mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL);
4710 	if (IS_ERR(mnt))
4711 		panic("Can't create rootfs");
4712 
4713 	ns = alloc_mnt_ns(&init_user_ns, false);
4714 	if (IS_ERR(ns))
4715 		panic("Can't allocate initial namespace");
4716 	m = real_mount(mnt);
4717 	m->mnt_ns = ns;
4718 	ns->root = m;
4719 	ns->mounts = 1;
4720 	list_add(&m->mnt_list, &ns->list);
4721 	init_task.nsproxy->mnt_ns = ns;
4722 	get_mnt_ns(ns);
4723 
4724 	root.mnt = mnt;
4725 	root.dentry = mnt->mnt_root;
4726 	mnt->mnt_flags |= MNT_LOCKED;
4727 
4728 	set_fs_pwd(current->fs, &root);
4729 	set_fs_root(current->fs, &root);
4730 }
4731 
4732 void __init mnt_init(void)
4733 {
4734 	int err;
4735 
4736 	mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
4737 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);
4738 
4739 	mount_hashtable = alloc_large_system_hash("Mount-cache",
4740 				sizeof(struct hlist_head),
4741 				mhash_entries, 19,
4742 				HASH_ZERO,
4743 				&m_hash_shift, &m_hash_mask, 0, 0);
4744 	mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
4745 				sizeof(struct hlist_head),
4746 				mphash_entries, 19,
4747 				HASH_ZERO,
4748 				&mp_hash_shift, &mp_hash_mask, 0, 0);
4749 
4750 	if (!mount_hashtable || !mountpoint_hashtable)
4751 		panic("Failed to allocate mount hash table\n");
4752 
4753 	kernfs_init();
4754 
4755 	err = sysfs_init();
4756 	if (err)
4757 		printk(KERN_WARNING "%s: sysfs_init error: %d\n",
4758 			__func__, err);
4759 	fs_kobj = kobject_create_and_add("fs", NULL);
4760 	if (!fs_kobj)
4761 		printk(KERN_WARNING "%s: kobj create error\n", __func__);
4762 	shmem_init();
4763 	init_rootfs();
4764 	init_mount_tree();
4765 }
4766 
4767 void put_mnt_ns(struct mnt_namespace *ns)
4768 {
4769 	if (!refcount_dec_and_test(&ns->ns.count))
4770 		return;
4771 	drop_collected_mounts(&ns->root->mnt);
4772 	free_mnt_ns(ns);
4773 }
4774 
4775 struct vfsmount *kern_mount(struct file_system_type *type)
4776 {
4777 	struct vfsmount *mnt;
4778 	mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
4779 	if (!IS_ERR(mnt)) {
4780 		/*
4781 		 * it is a longterm mount, don't release mnt until
4782 		 * we unmount before file sys is unregistered
4783 		*/
4784 		real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
4785 	}
4786 	return mnt;
4787 }
4788 EXPORT_SYMBOL_GPL(kern_mount);
4789 
4790 void kern_unmount(struct vfsmount *mnt)
4791 {
4792 	/* release long term mount so mount point can be released */
4793 	if (!IS_ERR(mnt)) {
4794 		mnt_make_shortterm(mnt);
4795 		synchronize_rcu();	/* yecchhh... */
4796 		mntput(mnt);
4797 	}
4798 }
4799 EXPORT_SYMBOL(kern_unmount);
4800 
4801 void kern_unmount_array(struct vfsmount *mnt[], unsigned int num)
4802 {
4803 	unsigned int i;
4804 
4805 	for (i = 0; i < num; i++)
4806 		mnt_make_shortterm(mnt[i]);
4807 	synchronize_rcu_expedited();
4808 	for (i = 0; i < num; i++)
4809 		mntput(mnt[i]);
4810 }
4811 EXPORT_SYMBOL(kern_unmount_array);
4812 
4813 bool our_mnt(struct vfsmount *mnt)
4814 {
4815 	return check_mnt(real_mount(mnt));
4816 }
4817 
4818 bool current_chrooted(void)
4819 {
4820 	/* Does the current process have a non-standard root */
4821 	struct path ns_root;
4822 	struct path fs_root;
4823 	bool chrooted;
4824 
4825 	/* Find the namespace root */
4826 	ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt;
4827 	ns_root.dentry = ns_root.mnt->mnt_root;
4828 	path_get(&ns_root);
4829 	while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
4830 		;
4831 
4832 	get_fs_root(current->fs, &fs_root);
4833 
4834 	chrooted = !path_equal(&fs_root, &ns_root);
4835 
4836 	path_put(&fs_root);
4837 	path_put(&ns_root);
4838 
4839 	return chrooted;
4840 }
4841 
4842 static bool mnt_already_visible(struct mnt_namespace *ns,
4843 				const struct super_block *sb,
4844 				int *new_mnt_flags)
4845 {
4846 	int new_flags = *new_mnt_flags;
4847 	struct mount *mnt;
4848 	bool visible = false;
4849 
4850 	down_read(&namespace_sem);
4851 	lock_ns_list(ns);
4852 	list_for_each_entry(mnt, &ns->list, mnt_list) {
4853 		struct mount *child;
4854 		int mnt_flags;
4855 
4856 		if (mnt_is_cursor(mnt))
4857 			continue;
4858 
4859 		if (mnt->mnt.mnt_sb->s_type != sb->s_type)
4860 			continue;
4861 
4862 		/* This mount is not fully visible if it's root directory
4863 		 * is not the root directory of the filesystem.
4864 		 */
4865 		if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
4866 			continue;
4867 
4868 		/* A local view of the mount flags */
4869 		mnt_flags = mnt->mnt.mnt_flags;
4870 
4871 		/* Don't miss readonly hidden in the superblock flags */
4872 		if (sb_rdonly(mnt->mnt.mnt_sb))
4873 			mnt_flags |= MNT_LOCK_READONLY;
4874 
4875 		/* Verify the mount flags are equal to or more permissive
4876 		 * than the proposed new mount.
4877 		 */
4878 		if ((mnt_flags & MNT_LOCK_READONLY) &&
4879 		    !(new_flags & MNT_READONLY))
4880 			continue;
4881 		if ((mnt_flags & MNT_LOCK_ATIME) &&
4882 		    ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK)))
4883 			continue;
4884 
4885 		/* This mount is not fully visible if there are any
4886 		 * locked child mounts that cover anything except for
4887 		 * empty directories.
4888 		 */
4889 		list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
4890 			struct inode *inode = child->mnt_mountpoint->d_inode;
4891 			/* Only worry about locked mounts */
4892 			if (!(child->mnt.mnt_flags & MNT_LOCKED))
4893 				continue;
4894 			/* Is the directory permanetly empty? */
4895 			if (!is_empty_dir_inode(inode))
4896 				goto next;
4897 		}
4898 		/* Preserve the locked attributes */
4899 		*new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \
4900 					       MNT_LOCK_ATIME);
4901 		visible = true;
4902 		goto found;
4903 	next:	;
4904 	}
4905 found:
4906 	unlock_ns_list(ns);
4907 	up_read(&namespace_sem);
4908 	return visible;
4909 }
4910 
4911 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags)
4912 {
4913 	const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV;
4914 	struct mnt_namespace *ns = current->nsproxy->mnt_ns;
4915 	unsigned long s_iflags;
4916 
4917 	if (ns->user_ns == &init_user_ns)
4918 		return false;
4919 
4920 	/* Can this filesystem be too revealing? */
4921 	s_iflags = sb->s_iflags;
4922 	if (!(s_iflags & SB_I_USERNS_VISIBLE))
4923 		return false;
4924 
4925 	if ((s_iflags & required_iflags) != required_iflags) {
4926 		WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n",
4927 			  required_iflags);
4928 		return true;
4929 	}
4930 
4931 	return !mnt_already_visible(ns, sb, new_mnt_flags);
4932 }
4933 
4934 bool mnt_may_suid(struct vfsmount *mnt)
4935 {
4936 	/*
4937 	 * Foreign mounts (accessed via fchdir or through /proc
4938 	 * symlinks) are always treated as if they are nosuid.  This
4939 	 * prevents namespaces from trusting potentially unsafe
4940 	 * suid/sgid bits, file caps, or security labels that originate
4941 	 * in other namespaces.
4942 	 */
4943 	return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) &&
4944 	       current_in_userns(mnt->mnt_sb->s_user_ns);
4945 }
4946 
4947 static struct ns_common *mntns_get(struct task_struct *task)
4948 {
4949 	struct ns_common *ns = NULL;
4950 	struct nsproxy *nsproxy;
4951 
4952 	task_lock(task);
4953 	nsproxy = task->nsproxy;
4954 	if (nsproxy) {
4955 		ns = &nsproxy->mnt_ns->ns;
4956 		get_mnt_ns(to_mnt_ns(ns));
4957 	}
4958 	task_unlock(task);
4959 
4960 	return ns;
4961 }
4962 
4963 static void mntns_put(struct ns_common *ns)
4964 {
4965 	put_mnt_ns(to_mnt_ns(ns));
4966 }
4967 
4968 static int mntns_install(struct nsset *nsset, struct ns_common *ns)
4969 {
4970 	struct nsproxy *nsproxy = nsset->nsproxy;
4971 	struct fs_struct *fs = nsset->fs;
4972 	struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns;
4973 	struct user_namespace *user_ns = nsset->cred->user_ns;
4974 	struct path root;
4975 	int err;
4976 
4977 	if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
4978 	    !ns_capable(user_ns, CAP_SYS_CHROOT) ||
4979 	    !ns_capable(user_ns, CAP_SYS_ADMIN))
4980 		return -EPERM;
4981 
4982 	if (is_anon_ns(mnt_ns))
4983 		return -EINVAL;
4984 
4985 	if (fs->users != 1)
4986 		return -EINVAL;
4987 
4988 	get_mnt_ns(mnt_ns);
4989 	old_mnt_ns = nsproxy->mnt_ns;
4990 	nsproxy->mnt_ns = mnt_ns;
4991 
4992 	/* Find the root */
4993 	err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt,
4994 				"/", LOOKUP_DOWN, &root);
4995 	if (err) {
4996 		/* revert to old namespace */
4997 		nsproxy->mnt_ns = old_mnt_ns;
4998 		put_mnt_ns(mnt_ns);
4999 		return err;
5000 	}
5001 
5002 	put_mnt_ns(old_mnt_ns);
5003 
5004 	/* Update the pwd and root */
5005 	set_fs_pwd(fs, &root);
5006 	set_fs_root(fs, &root);
5007 
5008 	path_put(&root);
5009 	return 0;
5010 }
5011 
5012 static struct user_namespace *mntns_owner(struct ns_common *ns)
5013 {
5014 	return to_mnt_ns(ns)->user_ns;
5015 }
5016 
5017 const struct proc_ns_operations mntns_operations = {
5018 	.name		= "mnt",
5019 	.type		= CLONE_NEWNS,
5020 	.get		= mntns_get,
5021 	.put		= mntns_put,
5022 	.install	= mntns_install,
5023 	.owner		= mntns_owner,
5024 };
5025 
5026 #ifdef CONFIG_SYSCTL
5027 static struct ctl_table fs_namespace_sysctls[] = {
5028 	{
5029 		.procname	= "mount-max",
5030 		.data		= &sysctl_mount_max,
5031 		.maxlen		= sizeof(unsigned int),
5032 		.mode		= 0644,
5033 		.proc_handler	= proc_dointvec_minmax,
5034 		.extra1		= SYSCTL_ONE,
5035 	},
5036 	{ }
5037 };
5038 
5039 static int __init init_fs_namespace_sysctls(void)
5040 {
5041 	register_sysctl_init("fs", fs_namespace_sysctls);
5042 	return 0;
5043 }
5044 fs_initcall(init_fs_namespace_sysctls);
5045 
5046 #endif /* CONFIG_SYSCTL */
5047