xref: /openbmc/linux/fs/namespace.c (revision d3597236)
1 /*
2  *  linux/fs/namespace.c
3  *
4  * (C) Copyright Al Viro 2000, 2001
5  *	Released under GPL v2.
6  *
7  * Based on code from fs/super.c, copyright Linus Torvalds and others.
8  * Heavily rewritten.
9  */
10 
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/idr.h>
19 #include <linux/init.h>		/* init_rootfs */
20 #include <linux/fs_struct.h>	/* get_fs_root et.al. */
21 #include <linux/fsnotify.h>	/* fsnotify_vfsmount_delete */
22 #include <linux/uaccess.h>
23 #include <linux/proc_ns.h>
24 #include <linux/magic.h>
25 #include <linux/bootmem.h>
26 #include <linux/task_work.h>
27 #include "pnode.h"
28 #include "internal.h"
29 
30 static unsigned int m_hash_mask __read_mostly;
31 static unsigned int m_hash_shift __read_mostly;
32 static unsigned int mp_hash_mask __read_mostly;
33 static unsigned int mp_hash_shift __read_mostly;
34 
35 static __initdata unsigned long mhash_entries;
36 static int __init set_mhash_entries(char *str)
37 {
38 	if (!str)
39 		return 0;
40 	mhash_entries = simple_strtoul(str, &str, 0);
41 	return 1;
42 }
43 __setup("mhash_entries=", set_mhash_entries);
44 
45 static __initdata unsigned long mphash_entries;
46 static int __init set_mphash_entries(char *str)
47 {
48 	if (!str)
49 		return 0;
50 	mphash_entries = simple_strtoul(str, &str, 0);
51 	return 1;
52 }
53 __setup("mphash_entries=", set_mphash_entries);
54 
55 static u64 event;
56 static DEFINE_IDA(mnt_id_ida);
57 static DEFINE_IDA(mnt_group_ida);
58 static DEFINE_SPINLOCK(mnt_id_lock);
59 static int mnt_id_start = 0;
60 static int mnt_group_start = 1;
61 
62 static struct hlist_head *mount_hashtable __read_mostly;
63 static struct hlist_head *mountpoint_hashtable __read_mostly;
64 static struct kmem_cache *mnt_cache __read_mostly;
65 static DECLARE_RWSEM(namespace_sem);
66 
67 /* /sys/fs */
68 struct kobject *fs_kobj;
69 EXPORT_SYMBOL_GPL(fs_kobj);
70 
71 /*
72  * vfsmount lock may be taken for read to prevent changes to the
73  * vfsmount hash, ie. during mountpoint lookups or walking back
74  * up the tree.
75  *
76  * It should be taken for write in all cases where the vfsmount
77  * tree or hash is modified or when a vfsmount structure is modified.
78  */
79 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
80 
81 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
82 {
83 	unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
84 	tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
85 	tmp = tmp + (tmp >> m_hash_shift);
86 	return &mount_hashtable[tmp & m_hash_mask];
87 }
88 
89 static inline struct hlist_head *mp_hash(struct dentry *dentry)
90 {
91 	unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
92 	tmp = tmp + (tmp >> mp_hash_shift);
93 	return &mountpoint_hashtable[tmp & mp_hash_mask];
94 }
95 
96 /*
97  * allocation is serialized by namespace_sem, but we need the spinlock to
98  * serialize with freeing.
99  */
100 static int mnt_alloc_id(struct mount *mnt)
101 {
102 	int res;
103 
104 retry:
105 	ida_pre_get(&mnt_id_ida, GFP_KERNEL);
106 	spin_lock(&mnt_id_lock);
107 	res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
108 	if (!res)
109 		mnt_id_start = mnt->mnt_id + 1;
110 	spin_unlock(&mnt_id_lock);
111 	if (res == -EAGAIN)
112 		goto retry;
113 
114 	return res;
115 }
116 
117 static void mnt_free_id(struct mount *mnt)
118 {
119 	int id = mnt->mnt_id;
120 	spin_lock(&mnt_id_lock);
121 	ida_remove(&mnt_id_ida, id);
122 	if (mnt_id_start > id)
123 		mnt_id_start = id;
124 	spin_unlock(&mnt_id_lock);
125 }
126 
127 /*
128  * Allocate a new peer group ID
129  *
130  * mnt_group_ida is protected by namespace_sem
131  */
132 static int mnt_alloc_group_id(struct mount *mnt)
133 {
134 	int res;
135 
136 	if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
137 		return -ENOMEM;
138 
139 	res = ida_get_new_above(&mnt_group_ida,
140 				mnt_group_start,
141 				&mnt->mnt_group_id);
142 	if (!res)
143 		mnt_group_start = mnt->mnt_group_id + 1;
144 
145 	return res;
146 }
147 
148 /*
149  * Release a peer group ID
150  */
151 void mnt_release_group_id(struct mount *mnt)
152 {
153 	int id = mnt->mnt_group_id;
154 	ida_remove(&mnt_group_ida, id);
155 	if (mnt_group_start > id)
156 		mnt_group_start = id;
157 	mnt->mnt_group_id = 0;
158 }
159 
160 /*
161  * vfsmount lock must be held for read
162  */
163 static inline void mnt_add_count(struct mount *mnt, int n)
164 {
165 #ifdef CONFIG_SMP
166 	this_cpu_add(mnt->mnt_pcp->mnt_count, n);
167 #else
168 	preempt_disable();
169 	mnt->mnt_count += n;
170 	preempt_enable();
171 #endif
172 }
173 
174 /*
175  * vfsmount lock must be held for write
176  */
177 unsigned int mnt_get_count(struct mount *mnt)
178 {
179 #ifdef CONFIG_SMP
180 	unsigned int count = 0;
181 	int cpu;
182 
183 	for_each_possible_cpu(cpu) {
184 		count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
185 	}
186 
187 	return count;
188 #else
189 	return mnt->mnt_count;
190 #endif
191 }
192 
193 static void drop_mountpoint(struct fs_pin *p)
194 {
195 	struct mount *m = container_of(p, struct mount, mnt_umount);
196 	dput(m->mnt_ex_mountpoint);
197 	pin_remove(p);
198 	mntput(&m->mnt);
199 }
200 
201 static struct mount *alloc_vfsmnt(const char *name)
202 {
203 	struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
204 	if (mnt) {
205 		int err;
206 
207 		err = mnt_alloc_id(mnt);
208 		if (err)
209 			goto out_free_cache;
210 
211 		if (name) {
212 			mnt->mnt_devname = kstrdup_const(name, GFP_KERNEL);
213 			if (!mnt->mnt_devname)
214 				goto out_free_id;
215 		}
216 
217 #ifdef CONFIG_SMP
218 		mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
219 		if (!mnt->mnt_pcp)
220 			goto out_free_devname;
221 
222 		this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
223 #else
224 		mnt->mnt_count = 1;
225 		mnt->mnt_writers = 0;
226 #endif
227 
228 		INIT_HLIST_NODE(&mnt->mnt_hash);
229 		INIT_LIST_HEAD(&mnt->mnt_child);
230 		INIT_LIST_HEAD(&mnt->mnt_mounts);
231 		INIT_LIST_HEAD(&mnt->mnt_list);
232 		INIT_LIST_HEAD(&mnt->mnt_expire);
233 		INIT_LIST_HEAD(&mnt->mnt_share);
234 		INIT_LIST_HEAD(&mnt->mnt_slave_list);
235 		INIT_LIST_HEAD(&mnt->mnt_slave);
236 		INIT_HLIST_NODE(&mnt->mnt_mp_list);
237 #ifdef CONFIG_FSNOTIFY
238 		INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
239 #endif
240 		init_fs_pin(&mnt->mnt_umount, drop_mountpoint);
241 	}
242 	return mnt;
243 
244 #ifdef CONFIG_SMP
245 out_free_devname:
246 	kfree_const(mnt->mnt_devname);
247 #endif
248 out_free_id:
249 	mnt_free_id(mnt);
250 out_free_cache:
251 	kmem_cache_free(mnt_cache, mnt);
252 	return NULL;
253 }
254 
255 /*
256  * Most r/o checks on a fs are for operations that take
257  * discrete amounts of time, like a write() or unlink().
258  * We must keep track of when those operations start
259  * (for permission checks) and when they end, so that
260  * we can determine when writes are able to occur to
261  * a filesystem.
262  */
263 /*
264  * __mnt_is_readonly: check whether a mount is read-only
265  * @mnt: the mount to check for its write status
266  *
267  * This shouldn't be used directly ouside of the VFS.
268  * It does not guarantee that the filesystem will stay
269  * r/w, just that it is right *now*.  This can not and
270  * should not be used in place of IS_RDONLY(inode).
271  * mnt_want/drop_write() will _keep_ the filesystem
272  * r/w.
273  */
274 int __mnt_is_readonly(struct vfsmount *mnt)
275 {
276 	if (mnt->mnt_flags & MNT_READONLY)
277 		return 1;
278 	if (mnt->mnt_sb->s_flags & MS_RDONLY)
279 		return 1;
280 	return 0;
281 }
282 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
283 
284 static inline void mnt_inc_writers(struct mount *mnt)
285 {
286 #ifdef CONFIG_SMP
287 	this_cpu_inc(mnt->mnt_pcp->mnt_writers);
288 #else
289 	mnt->mnt_writers++;
290 #endif
291 }
292 
293 static inline void mnt_dec_writers(struct mount *mnt)
294 {
295 #ifdef CONFIG_SMP
296 	this_cpu_dec(mnt->mnt_pcp->mnt_writers);
297 #else
298 	mnt->mnt_writers--;
299 #endif
300 }
301 
302 static unsigned int mnt_get_writers(struct mount *mnt)
303 {
304 #ifdef CONFIG_SMP
305 	unsigned int count = 0;
306 	int cpu;
307 
308 	for_each_possible_cpu(cpu) {
309 		count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
310 	}
311 
312 	return count;
313 #else
314 	return mnt->mnt_writers;
315 #endif
316 }
317 
318 static int mnt_is_readonly(struct vfsmount *mnt)
319 {
320 	if (mnt->mnt_sb->s_readonly_remount)
321 		return 1;
322 	/* Order wrt setting s_flags/s_readonly_remount in do_remount() */
323 	smp_rmb();
324 	return __mnt_is_readonly(mnt);
325 }
326 
327 /*
328  * Most r/o & frozen checks on a fs are for operations that take discrete
329  * amounts of time, like a write() or unlink().  We must keep track of when
330  * those operations start (for permission checks) and when they end, so that we
331  * can determine when writes are able to occur to a filesystem.
332  */
333 /**
334  * __mnt_want_write - get write access to a mount without freeze protection
335  * @m: the mount on which to take a write
336  *
337  * This tells the low-level filesystem that a write is about to be performed to
338  * it, and makes sure that writes are allowed (mnt it read-write) before
339  * returning success. This operation does not protect against filesystem being
340  * frozen. When the write operation is finished, __mnt_drop_write() must be
341  * called. This is effectively a refcount.
342  */
343 int __mnt_want_write(struct vfsmount *m)
344 {
345 	struct mount *mnt = real_mount(m);
346 	int ret = 0;
347 
348 	preempt_disable();
349 	mnt_inc_writers(mnt);
350 	/*
351 	 * The store to mnt_inc_writers must be visible before we pass
352 	 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
353 	 * incremented count after it has set MNT_WRITE_HOLD.
354 	 */
355 	smp_mb();
356 	while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
357 		cpu_relax();
358 	/*
359 	 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
360 	 * be set to match its requirements. So we must not load that until
361 	 * MNT_WRITE_HOLD is cleared.
362 	 */
363 	smp_rmb();
364 	if (mnt_is_readonly(m)) {
365 		mnt_dec_writers(mnt);
366 		ret = -EROFS;
367 	}
368 	preempt_enable();
369 
370 	return ret;
371 }
372 
373 /**
374  * mnt_want_write - get write access to a mount
375  * @m: the mount on which to take a write
376  *
377  * This tells the low-level filesystem that a write is about to be performed to
378  * it, and makes sure that writes are allowed (mount is read-write, filesystem
379  * is not frozen) before returning success.  When the write operation is
380  * finished, mnt_drop_write() must be called.  This is effectively a refcount.
381  */
382 int mnt_want_write(struct vfsmount *m)
383 {
384 	int ret;
385 
386 	sb_start_write(m->mnt_sb);
387 	ret = __mnt_want_write(m);
388 	if (ret)
389 		sb_end_write(m->mnt_sb);
390 	return ret;
391 }
392 EXPORT_SYMBOL_GPL(mnt_want_write);
393 
394 /**
395  * mnt_clone_write - get write access to a mount
396  * @mnt: the mount on which to take a write
397  *
398  * This is effectively like mnt_want_write, except
399  * it must only be used to take an extra write reference
400  * on a mountpoint that we already know has a write reference
401  * on it. This allows some optimisation.
402  *
403  * After finished, mnt_drop_write must be called as usual to
404  * drop the reference.
405  */
406 int mnt_clone_write(struct vfsmount *mnt)
407 {
408 	/* superblock may be r/o */
409 	if (__mnt_is_readonly(mnt))
410 		return -EROFS;
411 	preempt_disable();
412 	mnt_inc_writers(real_mount(mnt));
413 	preempt_enable();
414 	return 0;
415 }
416 EXPORT_SYMBOL_GPL(mnt_clone_write);
417 
418 /**
419  * __mnt_want_write_file - get write access to a file's mount
420  * @file: the file who's mount on which to take a write
421  *
422  * This is like __mnt_want_write, but it takes a file and can
423  * do some optimisations if the file is open for write already
424  */
425 int __mnt_want_write_file(struct file *file)
426 {
427 	if (!(file->f_mode & FMODE_WRITER))
428 		return __mnt_want_write(file->f_path.mnt);
429 	else
430 		return mnt_clone_write(file->f_path.mnt);
431 }
432 
433 /**
434  * mnt_want_write_file - get write access to a file's mount
435  * @file: the file who's mount on which to take a write
436  *
437  * This is like mnt_want_write, but it takes a file and can
438  * do some optimisations if the file is open for write already
439  */
440 int mnt_want_write_file(struct file *file)
441 {
442 	int ret;
443 
444 	sb_start_write(file->f_path.mnt->mnt_sb);
445 	ret = __mnt_want_write_file(file);
446 	if (ret)
447 		sb_end_write(file->f_path.mnt->mnt_sb);
448 	return ret;
449 }
450 EXPORT_SYMBOL_GPL(mnt_want_write_file);
451 
452 /**
453  * __mnt_drop_write - give up write access to a mount
454  * @mnt: the mount on which to give up write access
455  *
456  * Tells the low-level filesystem that we are done
457  * performing writes to it.  Must be matched with
458  * __mnt_want_write() call above.
459  */
460 void __mnt_drop_write(struct vfsmount *mnt)
461 {
462 	preempt_disable();
463 	mnt_dec_writers(real_mount(mnt));
464 	preempt_enable();
465 }
466 
467 /**
468  * mnt_drop_write - give up write access to a mount
469  * @mnt: the mount on which to give up write access
470  *
471  * Tells the low-level filesystem that we are done performing writes to it and
472  * also allows filesystem to be frozen again.  Must be matched with
473  * mnt_want_write() call above.
474  */
475 void mnt_drop_write(struct vfsmount *mnt)
476 {
477 	__mnt_drop_write(mnt);
478 	sb_end_write(mnt->mnt_sb);
479 }
480 EXPORT_SYMBOL_GPL(mnt_drop_write);
481 
482 void __mnt_drop_write_file(struct file *file)
483 {
484 	__mnt_drop_write(file->f_path.mnt);
485 }
486 
487 void mnt_drop_write_file(struct file *file)
488 {
489 	mnt_drop_write(file->f_path.mnt);
490 }
491 EXPORT_SYMBOL(mnt_drop_write_file);
492 
493 static int mnt_make_readonly(struct mount *mnt)
494 {
495 	int ret = 0;
496 
497 	lock_mount_hash();
498 	mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
499 	/*
500 	 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
501 	 * should be visible before we do.
502 	 */
503 	smp_mb();
504 
505 	/*
506 	 * With writers on hold, if this value is zero, then there are
507 	 * definitely no active writers (although held writers may subsequently
508 	 * increment the count, they'll have to wait, and decrement it after
509 	 * seeing MNT_READONLY).
510 	 *
511 	 * It is OK to have counter incremented on one CPU and decremented on
512 	 * another: the sum will add up correctly. The danger would be when we
513 	 * sum up each counter, if we read a counter before it is incremented,
514 	 * but then read another CPU's count which it has been subsequently
515 	 * decremented from -- we would see more decrements than we should.
516 	 * MNT_WRITE_HOLD protects against this scenario, because
517 	 * mnt_want_write first increments count, then smp_mb, then spins on
518 	 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
519 	 * we're counting up here.
520 	 */
521 	if (mnt_get_writers(mnt) > 0)
522 		ret = -EBUSY;
523 	else
524 		mnt->mnt.mnt_flags |= MNT_READONLY;
525 	/*
526 	 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
527 	 * that become unheld will see MNT_READONLY.
528 	 */
529 	smp_wmb();
530 	mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
531 	unlock_mount_hash();
532 	return ret;
533 }
534 
535 static void __mnt_unmake_readonly(struct mount *mnt)
536 {
537 	lock_mount_hash();
538 	mnt->mnt.mnt_flags &= ~MNT_READONLY;
539 	unlock_mount_hash();
540 }
541 
542 int sb_prepare_remount_readonly(struct super_block *sb)
543 {
544 	struct mount *mnt;
545 	int err = 0;
546 
547 	/* Racy optimization.  Recheck the counter under MNT_WRITE_HOLD */
548 	if (atomic_long_read(&sb->s_remove_count))
549 		return -EBUSY;
550 
551 	lock_mount_hash();
552 	list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
553 		if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
554 			mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
555 			smp_mb();
556 			if (mnt_get_writers(mnt) > 0) {
557 				err = -EBUSY;
558 				break;
559 			}
560 		}
561 	}
562 	if (!err && atomic_long_read(&sb->s_remove_count))
563 		err = -EBUSY;
564 
565 	if (!err) {
566 		sb->s_readonly_remount = 1;
567 		smp_wmb();
568 	}
569 	list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
570 		if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
571 			mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
572 	}
573 	unlock_mount_hash();
574 
575 	return err;
576 }
577 
578 static void free_vfsmnt(struct mount *mnt)
579 {
580 	kfree_const(mnt->mnt_devname);
581 #ifdef CONFIG_SMP
582 	free_percpu(mnt->mnt_pcp);
583 #endif
584 	kmem_cache_free(mnt_cache, mnt);
585 }
586 
587 static void delayed_free_vfsmnt(struct rcu_head *head)
588 {
589 	free_vfsmnt(container_of(head, struct mount, mnt_rcu));
590 }
591 
592 /* call under rcu_read_lock */
593 int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
594 {
595 	struct mount *mnt;
596 	if (read_seqretry(&mount_lock, seq))
597 		return 1;
598 	if (bastard == NULL)
599 		return 0;
600 	mnt = real_mount(bastard);
601 	mnt_add_count(mnt, 1);
602 	if (likely(!read_seqretry(&mount_lock, seq)))
603 		return 0;
604 	if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
605 		mnt_add_count(mnt, -1);
606 		return 1;
607 	}
608 	return -1;
609 }
610 
611 /* call under rcu_read_lock */
612 bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
613 {
614 	int res = __legitimize_mnt(bastard, seq);
615 	if (likely(!res))
616 		return true;
617 	if (unlikely(res < 0)) {
618 		rcu_read_unlock();
619 		mntput(bastard);
620 		rcu_read_lock();
621 	}
622 	return false;
623 }
624 
625 /*
626  * find the first mount at @dentry on vfsmount @mnt.
627  * call under rcu_read_lock()
628  */
629 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
630 {
631 	struct hlist_head *head = m_hash(mnt, dentry);
632 	struct mount *p;
633 
634 	hlist_for_each_entry_rcu(p, head, mnt_hash)
635 		if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
636 			return p;
637 	return NULL;
638 }
639 
640 /*
641  * find the last mount at @dentry on vfsmount @mnt.
642  * mount_lock must be held.
643  */
644 struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
645 {
646 	struct mount *p, *res = NULL;
647 	p = __lookup_mnt(mnt, dentry);
648 	if (!p)
649 		goto out;
650 	if (!(p->mnt.mnt_flags & MNT_UMOUNT))
651 		res = p;
652 	hlist_for_each_entry_continue(p, mnt_hash) {
653 		if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
654 			break;
655 		if (!(p->mnt.mnt_flags & MNT_UMOUNT))
656 			res = p;
657 	}
658 out:
659 	return res;
660 }
661 
662 /*
663  * lookup_mnt - Return the first child mount mounted at path
664  *
665  * "First" means first mounted chronologically.  If you create the
666  * following mounts:
667  *
668  * mount /dev/sda1 /mnt
669  * mount /dev/sda2 /mnt
670  * mount /dev/sda3 /mnt
671  *
672  * Then lookup_mnt() on the base /mnt dentry in the root mount will
673  * return successively the root dentry and vfsmount of /dev/sda1, then
674  * /dev/sda2, then /dev/sda3, then NULL.
675  *
676  * lookup_mnt takes a reference to the found vfsmount.
677  */
678 struct vfsmount *lookup_mnt(struct path *path)
679 {
680 	struct mount *child_mnt;
681 	struct vfsmount *m;
682 	unsigned seq;
683 
684 	rcu_read_lock();
685 	do {
686 		seq = read_seqbegin(&mount_lock);
687 		child_mnt = __lookup_mnt(path->mnt, path->dentry);
688 		m = child_mnt ? &child_mnt->mnt : NULL;
689 	} while (!legitimize_mnt(m, seq));
690 	rcu_read_unlock();
691 	return m;
692 }
693 
694 /*
695  * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
696  *                         current mount namespace.
697  *
698  * The common case is dentries are not mountpoints at all and that
699  * test is handled inline.  For the slow case when we are actually
700  * dealing with a mountpoint of some kind, walk through all of the
701  * mounts in the current mount namespace and test to see if the dentry
702  * is a mountpoint.
703  *
704  * The mount_hashtable is not usable in the context because we
705  * need to identify all mounts that may be in the current mount
706  * namespace not just a mount that happens to have some specified
707  * parent mount.
708  */
709 bool __is_local_mountpoint(struct dentry *dentry)
710 {
711 	struct mnt_namespace *ns = current->nsproxy->mnt_ns;
712 	struct mount *mnt;
713 	bool is_covered = false;
714 
715 	if (!d_mountpoint(dentry))
716 		goto out;
717 
718 	down_read(&namespace_sem);
719 	list_for_each_entry(mnt, &ns->list, mnt_list) {
720 		is_covered = (mnt->mnt_mountpoint == dentry);
721 		if (is_covered)
722 			break;
723 	}
724 	up_read(&namespace_sem);
725 out:
726 	return is_covered;
727 }
728 
729 static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
730 {
731 	struct hlist_head *chain = mp_hash(dentry);
732 	struct mountpoint *mp;
733 
734 	hlist_for_each_entry(mp, chain, m_hash) {
735 		if (mp->m_dentry == dentry) {
736 			/* might be worth a WARN_ON() */
737 			if (d_unlinked(dentry))
738 				return ERR_PTR(-ENOENT);
739 			mp->m_count++;
740 			return mp;
741 		}
742 	}
743 	return NULL;
744 }
745 
746 static struct mountpoint *new_mountpoint(struct dentry *dentry)
747 {
748 	struct hlist_head *chain = mp_hash(dentry);
749 	struct mountpoint *mp;
750 	int ret;
751 
752 	mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
753 	if (!mp)
754 		return ERR_PTR(-ENOMEM);
755 
756 	ret = d_set_mounted(dentry);
757 	if (ret) {
758 		kfree(mp);
759 		return ERR_PTR(ret);
760 	}
761 
762 	mp->m_dentry = dentry;
763 	mp->m_count = 1;
764 	hlist_add_head(&mp->m_hash, chain);
765 	INIT_HLIST_HEAD(&mp->m_list);
766 	return mp;
767 }
768 
769 static void put_mountpoint(struct mountpoint *mp)
770 {
771 	if (!--mp->m_count) {
772 		struct dentry *dentry = mp->m_dentry;
773 		BUG_ON(!hlist_empty(&mp->m_list));
774 		spin_lock(&dentry->d_lock);
775 		dentry->d_flags &= ~DCACHE_MOUNTED;
776 		spin_unlock(&dentry->d_lock);
777 		hlist_del(&mp->m_hash);
778 		kfree(mp);
779 	}
780 }
781 
782 static inline int check_mnt(struct mount *mnt)
783 {
784 	return mnt->mnt_ns == current->nsproxy->mnt_ns;
785 }
786 
787 /*
788  * vfsmount lock must be held for write
789  */
790 static void touch_mnt_namespace(struct mnt_namespace *ns)
791 {
792 	if (ns) {
793 		ns->event = ++event;
794 		wake_up_interruptible(&ns->poll);
795 	}
796 }
797 
798 /*
799  * vfsmount lock must be held for write
800  */
801 static void __touch_mnt_namespace(struct mnt_namespace *ns)
802 {
803 	if (ns && ns->event != event) {
804 		ns->event = event;
805 		wake_up_interruptible(&ns->poll);
806 	}
807 }
808 
809 /*
810  * vfsmount lock must be held for write
811  */
812 static void unhash_mnt(struct mount *mnt)
813 {
814 	mnt->mnt_parent = mnt;
815 	mnt->mnt_mountpoint = mnt->mnt.mnt_root;
816 	list_del_init(&mnt->mnt_child);
817 	hlist_del_init_rcu(&mnt->mnt_hash);
818 	hlist_del_init(&mnt->mnt_mp_list);
819 	put_mountpoint(mnt->mnt_mp);
820 	mnt->mnt_mp = NULL;
821 }
822 
823 /*
824  * vfsmount lock must be held for write
825  */
826 static void detach_mnt(struct mount *mnt, struct path *old_path)
827 {
828 	old_path->dentry = mnt->mnt_mountpoint;
829 	old_path->mnt = &mnt->mnt_parent->mnt;
830 	unhash_mnt(mnt);
831 }
832 
833 /*
834  * vfsmount lock must be held for write
835  */
836 static void umount_mnt(struct mount *mnt)
837 {
838 	/* old mountpoint will be dropped when we can do that */
839 	mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint;
840 	unhash_mnt(mnt);
841 }
842 
843 /*
844  * vfsmount lock must be held for write
845  */
846 void mnt_set_mountpoint(struct mount *mnt,
847 			struct mountpoint *mp,
848 			struct mount *child_mnt)
849 {
850 	mp->m_count++;
851 	mnt_add_count(mnt, 1);	/* essentially, that's mntget */
852 	child_mnt->mnt_mountpoint = dget(mp->m_dentry);
853 	child_mnt->mnt_parent = mnt;
854 	child_mnt->mnt_mp = mp;
855 	hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
856 }
857 
858 /*
859  * vfsmount lock must be held for write
860  */
861 static void attach_mnt(struct mount *mnt,
862 			struct mount *parent,
863 			struct mountpoint *mp)
864 {
865 	mnt_set_mountpoint(parent, mp, mnt);
866 	hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry));
867 	list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
868 }
869 
870 static void attach_shadowed(struct mount *mnt,
871 			struct mount *parent,
872 			struct mount *shadows)
873 {
874 	if (shadows) {
875 		hlist_add_behind_rcu(&mnt->mnt_hash, &shadows->mnt_hash);
876 		list_add(&mnt->mnt_child, &shadows->mnt_child);
877 	} else {
878 		hlist_add_head_rcu(&mnt->mnt_hash,
879 				m_hash(&parent->mnt, mnt->mnt_mountpoint));
880 		list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
881 	}
882 }
883 
884 /*
885  * vfsmount lock must be held for write
886  */
887 static void commit_tree(struct mount *mnt, struct mount *shadows)
888 {
889 	struct mount *parent = mnt->mnt_parent;
890 	struct mount *m;
891 	LIST_HEAD(head);
892 	struct mnt_namespace *n = parent->mnt_ns;
893 
894 	BUG_ON(parent == mnt);
895 
896 	list_add_tail(&head, &mnt->mnt_list);
897 	list_for_each_entry(m, &head, mnt_list)
898 		m->mnt_ns = n;
899 
900 	list_splice(&head, n->list.prev);
901 
902 	attach_shadowed(mnt, parent, shadows);
903 	touch_mnt_namespace(n);
904 }
905 
906 static struct mount *next_mnt(struct mount *p, struct mount *root)
907 {
908 	struct list_head *next = p->mnt_mounts.next;
909 	if (next == &p->mnt_mounts) {
910 		while (1) {
911 			if (p == root)
912 				return NULL;
913 			next = p->mnt_child.next;
914 			if (next != &p->mnt_parent->mnt_mounts)
915 				break;
916 			p = p->mnt_parent;
917 		}
918 	}
919 	return list_entry(next, struct mount, mnt_child);
920 }
921 
922 static struct mount *skip_mnt_tree(struct mount *p)
923 {
924 	struct list_head *prev = p->mnt_mounts.prev;
925 	while (prev != &p->mnt_mounts) {
926 		p = list_entry(prev, struct mount, mnt_child);
927 		prev = p->mnt_mounts.prev;
928 	}
929 	return p;
930 }
931 
932 struct vfsmount *
933 vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
934 {
935 	struct mount *mnt;
936 	struct dentry *root;
937 
938 	if (!type)
939 		return ERR_PTR(-ENODEV);
940 
941 	mnt = alloc_vfsmnt(name);
942 	if (!mnt)
943 		return ERR_PTR(-ENOMEM);
944 
945 	if (flags & MS_KERNMOUNT)
946 		mnt->mnt.mnt_flags = MNT_INTERNAL;
947 
948 	root = mount_fs(type, flags, name, data);
949 	if (IS_ERR(root)) {
950 		mnt_free_id(mnt);
951 		free_vfsmnt(mnt);
952 		return ERR_CAST(root);
953 	}
954 
955 	mnt->mnt.mnt_root = root;
956 	mnt->mnt.mnt_sb = root->d_sb;
957 	mnt->mnt_mountpoint = mnt->mnt.mnt_root;
958 	mnt->mnt_parent = mnt;
959 	lock_mount_hash();
960 	list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
961 	unlock_mount_hash();
962 	return &mnt->mnt;
963 }
964 EXPORT_SYMBOL_GPL(vfs_kern_mount);
965 
966 static struct mount *clone_mnt(struct mount *old, struct dentry *root,
967 					int flag)
968 {
969 	struct super_block *sb = old->mnt.mnt_sb;
970 	struct mount *mnt;
971 	int err;
972 
973 	mnt = alloc_vfsmnt(old->mnt_devname);
974 	if (!mnt)
975 		return ERR_PTR(-ENOMEM);
976 
977 	if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
978 		mnt->mnt_group_id = 0; /* not a peer of original */
979 	else
980 		mnt->mnt_group_id = old->mnt_group_id;
981 
982 	if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
983 		err = mnt_alloc_group_id(mnt);
984 		if (err)
985 			goto out_free;
986 	}
987 
988 	mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
989 	/* Don't allow unprivileged users to change mount flags */
990 	if (flag & CL_UNPRIVILEGED) {
991 		mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
992 
993 		if (mnt->mnt.mnt_flags & MNT_READONLY)
994 			mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
995 
996 		if (mnt->mnt.mnt_flags & MNT_NODEV)
997 			mnt->mnt.mnt_flags |= MNT_LOCK_NODEV;
998 
999 		if (mnt->mnt.mnt_flags & MNT_NOSUID)
1000 			mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID;
1001 
1002 		if (mnt->mnt.mnt_flags & MNT_NOEXEC)
1003 			mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC;
1004 	}
1005 
1006 	/* Don't allow unprivileged users to reveal what is under a mount */
1007 	if ((flag & CL_UNPRIVILEGED) &&
1008 	    (!(flag & CL_EXPIRE) || list_empty(&old->mnt_expire)))
1009 		mnt->mnt.mnt_flags |= MNT_LOCKED;
1010 
1011 	atomic_inc(&sb->s_active);
1012 	mnt->mnt.mnt_sb = sb;
1013 	mnt->mnt.mnt_root = dget(root);
1014 	mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1015 	mnt->mnt_parent = mnt;
1016 	lock_mount_hash();
1017 	list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
1018 	unlock_mount_hash();
1019 
1020 	if ((flag & CL_SLAVE) ||
1021 	    ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
1022 		list_add(&mnt->mnt_slave, &old->mnt_slave_list);
1023 		mnt->mnt_master = old;
1024 		CLEAR_MNT_SHARED(mnt);
1025 	} else if (!(flag & CL_PRIVATE)) {
1026 		if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
1027 			list_add(&mnt->mnt_share, &old->mnt_share);
1028 		if (IS_MNT_SLAVE(old))
1029 			list_add(&mnt->mnt_slave, &old->mnt_slave);
1030 		mnt->mnt_master = old->mnt_master;
1031 	}
1032 	if (flag & CL_MAKE_SHARED)
1033 		set_mnt_shared(mnt);
1034 
1035 	/* stick the duplicate mount on the same expiry list
1036 	 * as the original if that was on one */
1037 	if (flag & CL_EXPIRE) {
1038 		if (!list_empty(&old->mnt_expire))
1039 			list_add(&mnt->mnt_expire, &old->mnt_expire);
1040 	}
1041 
1042 	return mnt;
1043 
1044  out_free:
1045 	mnt_free_id(mnt);
1046 	free_vfsmnt(mnt);
1047 	return ERR_PTR(err);
1048 }
1049 
1050 static void cleanup_mnt(struct mount *mnt)
1051 {
1052 	/*
1053 	 * This probably indicates that somebody messed
1054 	 * up a mnt_want/drop_write() pair.  If this
1055 	 * happens, the filesystem was probably unable
1056 	 * to make r/w->r/o transitions.
1057 	 */
1058 	/*
1059 	 * The locking used to deal with mnt_count decrement provides barriers,
1060 	 * so mnt_get_writers() below is safe.
1061 	 */
1062 	WARN_ON(mnt_get_writers(mnt));
1063 	if (unlikely(mnt->mnt_pins.first))
1064 		mnt_pin_kill(mnt);
1065 	fsnotify_vfsmount_delete(&mnt->mnt);
1066 	dput(mnt->mnt.mnt_root);
1067 	deactivate_super(mnt->mnt.mnt_sb);
1068 	mnt_free_id(mnt);
1069 	call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
1070 }
1071 
1072 static void __cleanup_mnt(struct rcu_head *head)
1073 {
1074 	cleanup_mnt(container_of(head, struct mount, mnt_rcu));
1075 }
1076 
1077 static LLIST_HEAD(delayed_mntput_list);
1078 static void delayed_mntput(struct work_struct *unused)
1079 {
1080 	struct llist_node *node = llist_del_all(&delayed_mntput_list);
1081 	struct llist_node *next;
1082 
1083 	for (; node; node = next) {
1084 		next = llist_next(node);
1085 		cleanup_mnt(llist_entry(node, struct mount, mnt_llist));
1086 	}
1087 }
1088 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
1089 
1090 static void mntput_no_expire(struct mount *mnt)
1091 {
1092 	rcu_read_lock();
1093 	mnt_add_count(mnt, -1);
1094 	if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */
1095 		rcu_read_unlock();
1096 		return;
1097 	}
1098 	lock_mount_hash();
1099 	if (mnt_get_count(mnt)) {
1100 		rcu_read_unlock();
1101 		unlock_mount_hash();
1102 		return;
1103 	}
1104 	if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
1105 		rcu_read_unlock();
1106 		unlock_mount_hash();
1107 		return;
1108 	}
1109 	mnt->mnt.mnt_flags |= MNT_DOOMED;
1110 	rcu_read_unlock();
1111 
1112 	list_del(&mnt->mnt_instance);
1113 
1114 	if (unlikely(!list_empty(&mnt->mnt_mounts))) {
1115 		struct mount *p, *tmp;
1116 		list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts,  mnt_child) {
1117 			umount_mnt(p);
1118 		}
1119 	}
1120 	unlock_mount_hash();
1121 
1122 	if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
1123 		struct task_struct *task = current;
1124 		if (likely(!(task->flags & PF_KTHREAD))) {
1125 			init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
1126 			if (!task_work_add(task, &mnt->mnt_rcu, true))
1127 				return;
1128 		}
1129 		if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
1130 			schedule_delayed_work(&delayed_mntput_work, 1);
1131 		return;
1132 	}
1133 	cleanup_mnt(mnt);
1134 }
1135 
1136 void mntput(struct vfsmount *mnt)
1137 {
1138 	if (mnt) {
1139 		struct mount *m = real_mount(mnt);
1140 		/* avoid cacheline pingpong, hope gcc doesn't get "smart" */
1141 		if (unlikely(m->mnt_expiry_mark))
1142 			m->mnt_expiry_mark = 0;
1143 		mntput_no_expire(m);
1144 	}
1145 }
1146 EXPORT_SYMBOL(mntput);
1147 
1148 struct vfsmount *mntget(struct vfsmount *mnt)
1149 {
1150 	if (mnt)
1151 		mnt_add_count(real_mount(mnt), 1);
1152 	return mnt;
1153 }
1154 EXPORT_SYMBOL(mntget);
1155 
1156 struct vfsmount *mnt_clone_internal(struct path *path)
1157 {
1158 	struct mount *p;
1159 	p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE);
1160 	if (IS_ERR(p))
1161 		return ERR_CAST(p);
1162 	p->mnt.mnt_flags |= MNT_INTERNAL;
1163 	return &p->mnt;
1164 }
1165 
1166 static inline void mangle(struct seq_file *m, const char *s)
1167 {
1168 	seq_escape(m, s, " \t\n\\");
1169 }
1170 
1171 /*
1172  * Simple .show_options callback for filesystems which don't want to
1173  * implement more complex mount option showing.
1174  *
1175  * See also save_mount_options().
1176  */
1177 int generic_show_options(struct seq_file *m, struct dentry *root)
1178 {
1179 	const char *options;
1180 
1181 	rcu_read_lock();
1182 	options = rcu_dereference(root->d_sb->s_options);
1183 
1184 	if (options != NULL && options[0]) {
1185 		seq_putc(m, ',');
1186 		mangle(m, options);
1187 	}
1188 	rcu_read_unlock();
1189 
1190 	return 0;
1191 }
1192 EXPORT_SYMBOL(generic_show_options);
1193 
1194 /*
1195  * If filesystem uses generic_show_options(), this function should be
1196  * called from the fill_super() callback.
1197  *
1198  * The .remount_fs callback usually needs to be handled in a special
1199  * way, to make sure, that previous options are not overwritten if the
1200  * remount fails.
1201  *
1202  * Also note, that if the filesystem's .remount_fs function doesn't
1203  * reset all options to their default value, but changes only newly
1204  * given options, then the displayed options will not reflect reality
1205  * any more.
1206  */
1207 void save_mount_options(struct super_block *sb, char *options)
1208 {
1209 	BUG_ON(sb->s_options);
1210 	rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL));
1211 }
1212 EXPORT_SYMBOL(save_mount_options);
1213 
1214 void replace_mount_options(struct super_block *sb, char *options)
1215 {
1216 	char *old = sb->s_options;
1217 	rcu_assign_pointer(sb->s_options, options);
1218 	if (old) {
1219 		synchronize_rcu();
1220 		kfree(old);
1221 	}
1222 }
1223 EXPORT_SYMBOL(replace_mount_options);
1224 
1225 #ifdef CONFIG_PROC_FS
1226 /* iterator; we want it to have access to namespace_sem, thus here... */
1227 static void *m_start(struct seq_file *m, loff_t *pos)
1228 {
1229 	struct proc_mounts *p = m->private;
1230 
1231 	down_read(&namespace_sem);
1232 	if (p->cached_event == p->ns->event) {
1233 		void *v = p->cached_mount;
1234 		if (*pos == p->cached_index)
1235 			return v;
1236 		if (*pos == p->cached_index + 1) {
1237 			v = seq_list_next(v, &p->ns->list, &p->cached_index);
1238 			return p->cached_mount = v;
1239 		}
1240 	}
1241 
1242 	p->cached_event = p->ns->event;
1243 	p->cached_mount = seq_list_start(&p->ns->list, *pos);
1244 	p->cached_index = *pos;
1245 	return p->cached_mount;
1246 }
1247 
1248 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
1249 {
1250 	struct proc_mounts *p = m->private;
1251 
1252 	p->cached_mount = seq_list_next(v, &p->ns->list, pos);
1253 	p->cached_index = *pos;
1254 	return p->cached_mount;
1255 }
1256 
1257 static void m_stop(struct seq_file *m, void *v)
1258 {
1259 	up_read(&namespace_sem);
1260 }
1261 
1262 static int m_show(struct seq_file *m, void *v)
1263 {
1264 	struct proc_mounts *p = m->private;
1265 	struct mount *r = list_entry(v, struct mount, mnt_list);
1266 	return p->show(m, &r->mnt);
1267 }
1268 
1269 const struct seq_operations mounts_op = {
1270 	.start	= m_start,
1271 	.next	= m_next,
1272 	.stop	= m_stop,
1273 	.show	= m_show,
1274 };
1275 #endif  /* CONFIG_PROC_FS */
1276 
1277 /**
1278  * may_umount_tree - check if a mount tree is busy
1279  * @mnt: root of mount tree
1280  *
1281  * This is called to check if a tree of mounts has any
1282  * open files, pwds, chroots or sub mounts that are
1283  * busy.
1284  */
1285 int may_umount_tree(struct vfsmount *m)
1286 {
1287 	struct mount *mnt = real_mount(m);
1288 	int actual_refs = 0;
1289 	int minimum_refs = 0;
1290 	struct mount *p;
1291 	BUG_ON(!m);
1292 
1293 	/* write lock needed for mnt_get_count */
1294 	lock_mount_hash();
1295 	for (p = mnt; p; p = next_mnt(p, mnt)) {
1296 		actual_refs += mnt_get_count(p);
1297 		minimum_refs += 2;
1298 	}
1299 	unlock_mount_hash();
1300 
1301 	if (actual_refs > minimum_refs)
1302 		return 0;
1303 
1304 	return 1;
1305 }
1306 
1307 EXPORT_SYMBOL(may_umount_tree);
1308 
1309 /**
1310  * may_umount - check if a mount point is busy
1311  * @mnt: root of mount
1312  *
1313  * This is called to check if a mount point has any
1314  * open files, pwds, chroots or sub mounts. If the
1315  * mount has sub mounts this will return busy
1316  * regardless of whether the sub mounts are busy.
1317  *
1318  * Doesn't take quota and stuff into account. IOW, in some cases it will
1319  * give false negatives. The main reason why it's here is that we need
1320  * a non-destructive way to look for easily umountable filesystems.
1321  */
1322 int may_umount(struct vfsmount *mnt)
1323 {
1324 	int ret = 1;
1325 	down_read(&namespace_sem);
1326 	lock_mount_hash();
1327 	if (propagate_mount_busy(real_mount(mnt), 2))
1328 		ret = 0;
1329 	unlock_mount_hash();
1330 	up_read(&namespace_sem);
1331 	return ret;
1332 }
1333 
1334 EXPORT_SYMBOL(may_umount);
1335 
1336 static HLIST_HEAD(unmounted);	/* protected by namespace_sem */
1337 
1338 static void namespace_unlock(void)
1339 {
1340 	struct hlist_head head;
1341 
1342 	hlist_move_list(&unmounted, &head);
1343 
1344 	up_write(&namespace_sem);
1345 
1346 	if (likely(hlist_empty(&head)))
1347 		return;
1348 
1349 	synchronize_rcu();
1350 
1351 	group_pin_kill(&head);
1352 }
1353 
1354 static inline void namespace_lock(void)
1355 {
1356 	down_write(&namespace_sem);
1357 }
1358 
1359 enum umount_tree_flags {
1360 	UMOUNT_SYNC = 1,
1361 	UMOUNT_PROPAGATE = 2,
1362 	UMOUNT_CONNECTED = 4,
1363 };
1364 /*
1365  * mount_lock must be held
1366  * namespace_sem must be held for write
1367  */
1368 static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1369 {
1370 	LIST_HEAD(tmp_list);
1371 	struct mount *p;
1372 
1373 	if (how & UMOUNT_PROPAGATE)
1374 		propagate_mount_unlock(mnt);
1375 
1376 	/* Gather the mounts to umount */
1377 	for (p = mnt; p; p = next_mnt(p, mnt)) {
1378 		p->mnt.mnt_flags |= MNT_UMOUNT;
1379 		list_move(&p->mnt_list, &tmp_list);
1380 	}
1381 
1382 	/* Hide the mounts from mnt_mounts */
1383 	list_for_each_entry(p, &tmp_list, mnt_list) {
1384 		list_del_init(&p->mnt_child);
1385 	}
1386 
1387 	/* Add propogated mounts to the tmp_list */
1388 	if (how & UMOUNT_PROPAGATE)
1389 		propagate_umount(&tmp_list);
1390 
1391 	while (!list_empty(&tmp_list)) {
1392 		bool disconnect;
1393 		p = list_first_entry(&tmp_list, struct mount, mnt_list);
1394 		list_del_init(&p->mnt_expire);
1395 		list_del_init(&p->mnt_list);
1396 		__touch_mnt_namespace(p->mnt_ns);
1397 		p->mnt_ns = NULL;
1398 		if (how & UMOUNT_SYNC)
1399 			p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1400 
1401 		disconnect = !(((how & UMOUNT_CONNECTED) &&
1402 				mnt_has_parent(p) &&
1403 				(p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) ||
1404 			       IS_MNT_LOCKED_AND_LAZY(p));
1405 
1406 		pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
1407 				 disconnect ? &unmounted : NULL);
1408 		if (mnt_has_parent(p)) {
1409 			mnt_add_count(p->mnt_parent, -1);
1410 			if (!disconnect) {
1411 				/* Don't forget about p */
1412 				list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
1413 			} else {
1414 				umount_mnt(p);
1415 			}
1416 		}
1417 		change_mnt_propagation(p, MS_PRIVATE);
1418 	}
1419 }
1420 
1421 static void shrink_submounts(struct mount *mnt);
1422 
1423 static int do_umount(struct mount *mnt, int flags)
1424 {
1425 	struct super_block *sb = mnt->mnt.mnt_sb;
1426 	int retval;
1427 
1428 	retval = security_sb_umount(&mnt->mnt, flags);
1429 	if (retval)
1430 		return retval;
1431 
1432 	/*
1433 	 * Allow userspace to request a mountpoint be expired rather than
1434 	 * unmounting unconditionally. Unmount only happens if:
1435 	 *  (1) the mark is already set (the mark is cleared by mntput())
1436 	 *  (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1437 	 */
1438 	if (flags & MNT_EXPIRE) {
1439 		if (&mnt->mnt == current->fs->root.mnt ||
1440 		    flags & (MNT_FORCE | MNT_DETACH))
1441 			return -EINVAL;
1442 
1443 		/*
1444 		 * probably don't strictly need the lock here if we examined
1445 		 * all race cases, but it's a slowpath.
1446 		 */
1447 		lock_mount_hash();
1448 		if (mnt_get_count(mnt) != 2) {
1449 			unlock_mount_hash();
1450 			return -EBUSY;
1451 		}
1452 		unlock_mount_hash();
1453 
1454 		if (!xchg(&mnt->mnt_expiry_mark, 1))
1455 			return -EAGAIN;
1456 	}
1457 
1458 	/*
1459 	 * If we may have to abort operations to get out of this
1460 	 * mount, and they will themselves hold resources we must
1461 	 * allow the fs to do things. In the Unix tradition of
1462 	 * 'Gee thats tricky lets do it in userspace' the umount_begin
1463 	 * might fail to complete on the first run through as other tasks
1464 	 * must return, and the like. Thats for the mount program to worry
1465 	 * about for the moment.
1466 	 */
1467 
1468 	if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1469 		sb->s_op->umount_begin(sb);
1470 	}
1471 
1472 	/*
1473 	 * No sense to grab the lock for this test, but test itself looks
1474 	 * somewhat bogus. Suggestions for better replacement?
1475 	 * Ho-hum... In principle, we might treat that as umount + switch
1476 	 * to rootfs. GC would eventually take care of the old vfsmount.
1477 	 * Actually it makes sense, especially if rootfs would contain a
1478 	 * /reboot - static binary that would close all descriptors and
1479 	 * call reboot(9). Then init(8) could umount root and exec /reboot.
1480 	 */
1481 	if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1482 		/*
1483 		 * Special case for "unmounting" root ...
1484 		 * we just try to remount it readonly.
1485 		 */
1486 		if (!capable(CAP_SYS_ADMIN))
1487 			return -EPERM;
1488 		down_write(&sb->s_umount);
1489 		if (!(sb->s_flags & MS_RDONLY))
1490 			retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
1491 		up_write(&sb->s_umount);
1492 		return retval;
1493 	}
1494 
1495 	namespace_lock();
1496 	lock_mount_hash();
1497 	event++;
1498 
1499 	if (flags & MNT_DETACH) {
1500 		if (!list_empty(&mnt->mnt_list))
1501 			umount_tree(mnt, UMOUNT_PROPAGATE);
1502 		retval = 0;
1503 	} else {
1504 		shrink_submounts(mnt);
1505 		retval = -EBUSY;
1506 		if (!propagate_mount_busy(mnt, 2)) {
1507 			if (!list_empty(&mnt->mnt_list))
1508 				umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
1509 			retval = 0;
1510 		}
1511 	}
1512 	unlock_mount_hash();
1513 	namespace_unlock();
1514 	return retval;
1515 }
1516 
1517 /*
1518  * __detach_mounts - lazily unmount all mounts on the specified dentry
1519  *
1520  * During unlink, rmdir, and d_drop it is possible to loose the path
1521  * to an existing mountpoint, and wind up leaking the mount.
1522  * detach_mounts allows lazily unmounting those mounts instead of
1523  * leaking them.
1524  *
1525  * The caller may hold dentry->d_inode->i_mutex.
1526  */
1527 void __detach_mounts(struct dentry *dentry)
1528 {
1529 	struct mountpoint *mp;
1530 	struct mount *mnt;
1531 
1532 	namespace_lock();
1533 	mp = lookup_mountpoint(dentry);
1534 	if (IS_ERR_OR_NULL(mp))
1535 		goto out_unlock;
1536 
1537 	lock_mount_hash();
1538 	while (!hlist_empty(&mp->m_list)) {
1539 		mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1540 		if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
1541 			struct mount *p, *tmp;
1542 			list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts,  mnt_child) {
1543 				hlist_add_head(&p->mnt_umount.s_list, &unmounted);
1544 				umount_mnt(p);
1545 			}
1546 		}
1547 		else umount_tree(mnt, UMOUNT_CONNECTED);
1548 	}
1549 	unlock_mount_hash();
1550 	put_mountpoint(mp);
1551 out_unlock:
1552 	namespace_unlock();
1553 }
1554 
1555 /*
1556  * Is the caller allowed to modify his namespace?
1557  */
1558 static inline bool may_mount(void)
1559 {
1560 	return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
1561 }
1562 
1563 /*
1564  * Now umount can handle mount points as well as block devices.
1565  * This is important for filesystems which use unnamed block devices.
1566  *
1567  * We now support a flag for forced unmount like the other 'big iron'
1568  * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1569  */
1570 
1571 SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1572 {
1573 	struct path path;
1574 	struct mount *mnt;
1575 	int retval;
1576 	int lookup_flags = 0;
1577 
1578 	if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
1579 		return -EINVAL;
1580 
1581 	if (!may_mount())
1582 		return -EPERM;
1583 
1584 	if (!(flags & UMOUNT_NOFOLLOW))
1585 		lookup_flags |= LOOKUP_FOLLOW;
1586 
1587 	retval = user_path_mountpoint_at(AT_FDCWD, name, lookup_flags, &path);
1588 	if (retval)
1589 		goto out;
1590 	mnt = real_mount(path.mnt);
1591 	retval = -EINVAL;
1592 	if (path.dentry != path.mnt->mnt_root)
1593 		goto dput_and_out;
1594 	if (!check_mnt(mnt))
1595 		goto dput_and_out;
1596 	if (mnt->mnt.mnt_flags & MNT_LOCKED)
1597 		goto dput_and_out;
1598 	retval = -EPERM;
1599 	if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
1600 		goto dput_and_out;
1601 
1602 	retval = do_umount(mnt, flags);
1603 dput_and_out:
1604 	/* we mustn't call path_put() as that would clear mnt_expiry_mark */
1605 	dput(path.dentry);
1606 	mntput_no_expire(mnt);
1607 out:
1608 	return retval;
1609 }
1610 
1611 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1612 
1613 /*
1614  *	The 2.0 compatible umount. No flags.
1615  */
1616 SYSCALL_DEFINE1(oldumount, char __user *, name)
1617 {
1618 	return sys_umount(name, 0);
1619 }
1620 
1621 #endif
1622 
1623 static bool is_mnt_ns_file(struct dentry *dentry)
1624 {
1625 	/* Is this a proxy for a mount namespace? */
1626 	return dentry->d_op == &ns_dentry_operations &&
1627 	       dentry->d_fsdata == &mntns_operations;
1628 }
1629 
1630 struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
1631 {
1632 	return container_of(ns, struct mnt_namespace, ns);
1633 }
1634 
1635 static bool mnt_ns_loop(struct dentry *dentry)
1636 {
1637 	/* Could bind mounting the mount namespace inode cause a
1638 	 * mount namespace loop?
1639 	 */
1640 	struct mnt_namespace *mnt_ns;
1641 	if (!is_mnt_ns_file(dentry))
1642 		return false;
1643 
1644 	mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode));
1645 	return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
1646 }
1647 
1648 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1649 					int flag)
1650 {
1651 	struct mount *res, *p, *q, *r, *parent;
1652 
1653 	if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt))
1654 		return ERR_PTR(-EINVAL);
1655 
1656 	if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
1657 		return ERR_PTR(-EINVAL);
1658 
1659 	res = q = clone_mnt(mnt, dentry, flag);
1660 	if (IS_ERR(q))
1661 		return q;
1662 
1663 	q->mnt_mountpoint = mnt->mnt_mountpoint;
1664 
1665 	p = mnt;
1666 	list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1667 		struct mount *s;
1668 		if (!is_subdir(r->mnt_mountpoint, dentry))
1669 			continue;
1670 
1671 		for (s = r; s; s = next_mnt(s, r)) {
1672 			struct mount *t = NULL;
1673 			if (!(flag & CL_COPY_UNBINDABLE) &&
1674 			    IS_MNT_UNBINDABLE(s)) {
1675 				s = skip_mnt_tree(s);
1676 				continue;
1677 			}
1678 			if (!(flag & CL_COPY_MNT_NS_FILE) &&
1679 			    is_mnt_ns_file(s->mnt.mnt_root)) {
1680 				s = skip_mnt_tree(s);
1681 				continue;
1682 			}
1683 			while (p != s->mnt_parent) {
1684 				p = p->mnt_parent;
1685 				q = q->mnt_parent;
1686 			}
1687 			p = s;
1688 			parent = q;
1689 			q = clone_mnt(p, p->mnt.mnt_root, flag);
1690 			if (IS_ERR(q))
1691 				goto out;
1692 			lock_mount_hash();
1693 			list_add_tail(&q->mnt_list, &res->mnt_list);
1694 			mnt_set_mountpoint(parent, p->mnt_mp, q);
1695 			if (!list_empty(&parent->mnt_mounts)) {
1696 				t = list_last_entry(&parent->mnt_mounts,
1697 					struct mount, mnt_child);
1698 				if (t->mnt_mp != p->mnt_mp)
1699 					t = NULL;
1700 			}
1701 			attach_shadowed(q, parent, t);
1702 			unlock_mount_hash();
1703 		}
1704 	}
1705 	return res;
1706 out:
1707 	if (res) {
1708 		lock_mount_hash();
1709 		umount_tree(res, UMOUNT_SYNC);
1710 		unlock_mount_hash();
1711 	}
1712 	return q;
1713 }
1714 
1715 /* Caller should check returned pointer for errors */
1716 
1717 struct vfsmount *collect_mounts(struct path *path)
1718 {
1719 	struct mount *tree;
1720 	namespace_lock();
1721 	if (!check_mnt(real_mount(path->mnt)))
1722 		tree = ERR_PTR(-EINVAL);
1723 	else
1724 		tree = copy_tree(real_mount(path->mnt), path->dentry,
1725 				 CL_COPY_ALL | CL_PRIVATE);
1726 	namespace_unlock();
1727 	if (IS_ERR(tree))
1728 		return ERR_CAST(tree);
1729 	return &tree->mnt;
1730 }
1731 
1732 void drop_collected_mounts(struct vfsmount *mnt)
1733 {
1734 	namespace_lock();
1735 	lock_mount_hash();
1736 	umount_tree(real_mount(mnt), UMOUNT_SYNC);
1737 	unlock_mount_hash();
1738 	namespace_unlock();
1739 }
1740 
1741 /**
1742  * clone_private_mount - create a private clone of a path
1743  *
1744  * This creates a new vfsmount, which will be the clone of @path.  The new will
1745  * not be attached anywhere in the namespace and will be private (i.e. changes
1746  * to the originating mount won't be propagated into this).
1747  *
1748  * Release with mntput().
1749  */
1750 struct vfsmount *clone_private_mount(struct path *path)
1751 {
1752 	struct mount *old_mnt = real_mount(path->mnt);
1753 	struct mount *new_mnt;
1754 
1755 	if (IS_MNT_UNBINDABLE(old_mnt))
1756 		return ERR_PTR(-EINVAL);
1757 
1758 	down_read(&namespace_sem);
1759 	new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
1760 	up_read(&namespace_sem);
1761 	if (IS_ERR(new_mnt))
1762 		return ERR_CAST(new_mnt);
1763 
1764 	return &new_mnt->mnt;
1765 }
1766 EXPORT_SYMBOL_GPL(clone_private_mount);
1767 
1768 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
1769 		   struct vfsmount *root)
1770 {
1771 	struct mount *mnt;
1772 	int res = f(root, arg);
1773 	if (res)
1774 		return res;
1775 	list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
1776 		res = f(&mnt->mnt, arg);
1777 		if (res)
1778 			return res;
1779 	}
1780 	return 0;
1781 }
1782 
1783 static void cleanup_group_ids(struct mount *mnt, struct mount *end)
1784 {
1785 	struct mount *p;
1786 
1787 	for (p = mnt; p != end; p = next_mnt(p, mnt)) {
1788 		if (p->mnt_group_id && !IS_MNT_SHARED(p))
1789 			mnt_release_group_id(p);
1790 	}
1791 }
1792 
1793 static int invent_group_ids(struct mount *mnt, bool recurse)
1794 {
1795 	struct mount *p;
1796 
1797 	for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
1798 		if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
1799 			int err = mnt_alloc_group_id(p);
1800 			if (err) {
1801 				cleanup_group_ids(mnt, p);
1802 				return err;
1803 			}
1804 		}
1805 	}
1806 
1807 	return 0;
1808 }
1809 
1810 /*
1811  *  @source_mnt : mount tree to be attached
1812  *  @nd         : place the mount tree @source_mnt is attached
1813  *  @parent_nd  : if non-null, detach the source_mnt from its parent and
1814  *  		   store the parent mount and mountpoint dentry.
1815  *  		   (done when source_mnt is moved)
1816  *
1817  *  NOTE: in the table below explains the semantics when a source mount
1818  *  of a given type is attached to a destination mount of a given type.
1819  * ---------------------------------------------------------------------------
1820  * |         BIND MOUNT OPERATION                                            |
1821  * |**************************************************************************
1822  * | source-->| shared        |       private  |       slave    | unbindable |
1823  * | dest     |               |                |                |            |
1824  * |   |      |               |                |                |            |
1825  * |   v      |               |                |                |            |
1826  * |**************************************************************************
1827  * |  shared  | shared (++)   |     shared (+) |     shared(+++)|  invalid   |
1828  * |          |               |                |                |            |
1829  * |non-shared| shared (+)    |      private   |      slave (*) |  invalid   |
1830  * ***************************************************************************
1831  * A bind operation clones the source mount and mounts the clone on the
1832  * destination mount.
1833  *
1834  * (++)  the cloned mount is propagated to all the mounts in the propagation
1835  * 	 tree of the destination mount and the cloned mount is added to
1836  * 	 the peer group of the source mount.
1837  * (+)   the cloned mount is created under the destination mount and is marked
1838  *       as shared. The cloned mount is added to the peer group of the source
1839  *       mount.
1840  * (+++) the mount is propagated to all the mounts in the propagation tree
1841  *       of the destination mount and the cloned mount is made slave
1842  *       of the same master as that of the source mount. The cloned mount
1843  *       is marked as 'shared and slave'.
1844  * (*)   the cloned mount is made a slave of the same master as that of the
1845  * 	 source mount.
1846  *
1847  * ---------------------------------------------------------------------------
1848  * |         		MOVE MOUNT OPERATION                                 |
1849  * |**************************************************************************
1850  * | source-->| shared        |       private  |       slave    | unbindable |
1851  * | dest     |               |                |                |            |
1852  * |   |      |               |                |                |            |
1853  * |   v      |               |                |                |            |
1854  * |**************************************************************************
1855  * |  shared  | shared (+)    |     shared (+) |    shared(+++) |  invalid   |
1856  * |          |               |                |                |            |
1857  * |non-shared| shared (+*)   |      private   |    slave (*)   | unbindable |
1858  * ***************************************************************************
1859  *
1860  * (+)  the mount is moved to the destination. And is then propagated to
1861  * 	all the mounts in the propagation tree of the destination mount.
1862  * (+*)  the mount is moved to the destination.
1863  * (+++)  the mount is moved to the destination and is then propagated to
1864  * 	all the mounts belonging to the destination mount's propagation tree.
1865  * 	the mount is marked as 'shared and slave'.
1866  * (*)	the mount continues to be a slave at the new location.
1867  *
1868  * if the source mount is a tree, the operations explained above is
1869  * applied to each mount in the tree.
1870  * Must be called without spinlocks held, since this function can sleep
1871  * in allocations.
1872  */
1873 static int attach_recursive_mnt(struct mount *source_mnt,
1874 			struct mount *dest_mnt,
1875 			struct mountpoint *dest_mp,
1876 			struct path *parent_path)
1877 {
1878 	HLIST_HEAD(tree_list);
1879 	struct mount *child, *p;
1880 	struct hlist_node *n;
1881 	int err;
1882 
1883 	if (IS_MNT_SHARED(dest_mnt)) {
1884 		err = invent_group_ids(source_mnt, true);
1885 		if (err)
1886 			goto out;
1887 		err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
1888 		lock_mount_hash();
1889 		if (err)
1890 			goto out_cleanup_ids;
1891 		for (p = source_mnt; p; p = next_mnt(p, source_mnt))
1892 			set_mnt_shared(p);
1893 	} else {
1894 		lock_mount_hash();
1895 	}
1896 	if (parent_path) {
1897 		detach_mnt(source_mnt, parent_path);
1898 		attach_mnt(source_mnt, dest_mnt, dest_mp);
1899 		touch_mnt_namespace(source_mnt->mnt_ns);
1900 	} else {
1901 		mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
1902 		commit_tree(source_mnt, NULL);
1903 	}
1904 
1905 	hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
1906 		struct mount *q;
1907 		hlist_del_init(&child->mnt_hash);
1908 		q = __lookup_mnt_last(&child->mnt_parent->mnt,
1909 				      child->mnt_mountpoint);
1910 		commit_tree(child, q);
1911 	}
1912 	unlock_mount_hash();
1913 
1914 	return 0;
1915 
1916  out_cleanup_ids:
1917 	while (!hlist_empty(&tree_list)) {
1918 		child = hlist_entry(tree_list.first, struct mount, mnt_hash);
1919 		umount_tree(child, UMOUNT_SYNC);
1920 	}
1921 	unlock_mount_hash();
1922 	cleanup_group_ids(source_mnt, NULL);
1923  out:
1924 	return err;
1925 }
1926 
1927 static struct mountpoint *lock_mount(struct path *path)
1928 {
1929 	struct vfsmount *mnt;
1930 	struct dentry *dentry = path->dentry;
1931 retry:
1932 	mutex_lock(&dentry->d_inode->i_mutex);
1933 	if (unlikely(cant_mount(dentry))) {
1934 		mutex_unlock(&dentry->d_inode->i_mutex);
1935 		return ERR_PTR(-ENOENT);
1936 	}
1937 	namespace_lock();
1938 	mnt = lookup_mnt(path);
1939 	if (likely(!mnt)) {
1940 		struct mountpoint *mp = lookup_mountpoint(dentry);
1941 		if (!mp)
1942 			mp = new_mountpoint(dentry);
1943 		if (IS_ERR(mp)) {
1944 			namespace_unlock();
1945 			mutex_unlock(&dentry->d_inode->i_mutex);
1946 			return mp;
1947 		}
1948 		return mp;
1949 	}
1950 	namespace_unlock();
1951 	mutex_unlock(&path->dentry->d_inode->i_mutex);
1952 	path_put(path);
1953 	path->mnt = mnt;
1954 	dentry = path->dentry = dget(mnt->mnt_root);
1955 	goto retry;
1956 }
1957 
1958 static void unlock_mount(struct mountpoint *where)
1959 {
1960 	struct dentry *dentry = where->m_dentry;
1961 	put_mountpoint(where);
1962 	namespace_unlock();
1963 	mutex_unlock(&dentry->d_inode->i_mutex);
1964 }
1965 
1966 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
1967 {
1968 	if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER)
1969 		return -EINVAL;
1970 
1971 	if (d_is_dir(mp->m_dentry) !=
1972 	      d_is_dir(mnt->mnt.mnt_root))
1973 		return -ENOTDIR;
1974 
1975 	return attach_recursive_mnt(mnt, p, mp, NULL);
1976 }
1977 
1978 /*
1979  * Sanity check the flags to change_mnt_propagation.
1980  */
1981 
1982 static int flags_to_propagation_type(int flags)
1983 {
1984 	int type = flags & ~(MS_REC | MS_SILENT);
1985 
1986 	/* Fail if any non-propagation flags are set */
1987 	if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
1988 		return 0;
1989 	/* Only one propagation flag should be set */
1990 	if (!is_power_of_2(type))
1991 		return 0;
1992 	return type;
1993 }
1994 
1995 /*
1996  * recursively change the type of the mountpoint.
1997  */
1998 static int do_change_type(struct path *path, int flag)
1999 {
2000 	struct mount *m;
2001 	struct mount *mnt = real_mount(path->mnt);
2002 	int recurse = flag & MS_REC;
2003 	int type;
2004 	int err = 0;
2005 
2006 	if (path->dentry != path->mnt->mnt_root)
2007 		return -EINVAL;
2008 
2009 	type = flags_to_propagation_type(flag);
2010 	if (!type)
2011 		return -EINVAL;
2012 
2013 	namespace_lock();
2014 	if (type == MS_SHARED) {
2015 		err = invent_group_ids(mnt, recurse);
2016 		if (err)
2017 			goto out_unlock;
2018 	}
2019 
2020 	lock_mount_hash();
2021 	for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
2022 		change_mnt_propagation(m, type);
2023 	unlock_mount_hash();
2024 
2025  out_unlock:
2026 	namespace_unlock();
2027 	return err;
2028 }
2029 
2030 static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
2031 {
2032 	struct mount *child;
2033 	list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
2034 		if (!is_subdir(child->mnt_mountpoint, dentry))
2035 			continue;
2036 
2037 		if (child->mnt.mnt_flags & MNT_LOCKED)
2038 			return true;
2039 	}
2040 	return false;
2041 }
2042 
2043 /*
2044  * do loopback mount.
2045  */
2046 static int do_loopback(struct path *path, const char *old_name,
2047 				int recurse)
2048 {
2049 	struct path old_path;
2050 	struct mount *mnt = NULL, *old, *parent;
2051 	struct mountpoint *mp;
2052 	int err;
2053 	if (!old_name || !*old_name)
2054 		return -EINVAL;
2055 	err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
2056 	if (err)
2057 		return err;
2058 
2059 	err = -EINVAL;
2060 	if (mnt_ns_loop(old_path.dentry))
2061 		goto out;
2062 
2063 	mp = lock_mount(path);
2064 	err = PTR_ERR(mp);
2065 	if (IS_ERR(mp))
2066 		goto out;
2067 
2068 	old = real_mount(old_path.mnt);
2069 	parent = real_mount(path->mnt);
2070 
2071 	err = -EINVAL;
2072 	if (IS_MNT_UNBINDABLE(old))
2073 		goto out2;
2074 
2075 	if (!check_mnt(parent))
2076 		goto out2;
2077 
2078 	if (!check_mnt(old) && old_path.dentry->d_op != &ns_dentry_operations)
2079 		goto out2;
2080 
2081 	if (!recurse && has_locked_children(old, old_path.dentry))
2082 		goto out2;
2083 
2084 	if (recurse)
2085 		mnt = copy_tree(old, old_path.dentry, CL_COPY_MNT_NS_FILE);
2086 	else
2087 		mnt = clone_mnt(old, old_path.dentry, 0);
2088 
2089 	if (IS_ERR(mnt)) {
2090 		err = PTR_ERR(mnt);
2091 		goto out2;
2092 	}
2093 
2094 	mnt->mnt.mnt_flags &= ~MNT_LOCKED;
2095 
2096 	err = graft_tree(mnt, parent, mp);
2097 	if (err) {
2098 		lock_mount_hash();
2099 		umount_tree(mnt, UMOUNT_SYNC);
2100 		unlock_mount_hash();
2101 	}
2102 out2:
2103 	unlock_mount(mp);
2104 out:
2105 	path_put(&old_path);
2106 	return err;
2107 }
2108 
2109 static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
2110 {
2111 	int error = 0;
2112 	int readonly_request = 0;
2113 
2114 	if (ms_flags & MS_RDONLY)
2115 		readonly_request = 1;
2116 	if (readonly_request == __mnt_is_readonly(mnt))
2117 		return 0;
2118 
2119 	if (readonly_request)
2120 		error = mnt_make_readonly(real_mount(mnt));
2121 	else
2122 		__mnt_unmake_readonly(real_mount(mnt));
2123 	return error;
2124 }
2125 
2126 /*
2127  * change filesystem flags. dir should be a physical root of filesystem.
2128  * If you've mounted a non-root directory somewhere and want to do remount
2129  * on it - tough luck.
2130  */
2131 static int do_remount(struct path *path, int flags, int mnt_flags,
2132 		      void *data)
2133 {
2134 	int err;
2135 	struct super_block *sb = path->mnt->mnt_sb;
2136 	struct mount *mnt = real_mount(path->mnt);
2137 
2138 	if (!check_mnt(mnt))
2139 		return -EINVAL;
2140 
2141 	if (path->dentry != path->mnt->mnt_root)
2142 		return -EINVAL;
2143 
2144 	/* Don't allow changing of locked mnt flags.
2145 	 *
2146 	 * No locks need to be held here while testing the various
2147 	 * MNT_LOCK flags because those flags can never be cleared
2148 	 * once they are set.
2149 	 */
2150 	if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
2151 	    !(mnt_flags & MNT_READONLY)) {
2152 		return -EPERM;
2153 	}
2154 	if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
2155 	    !(mnt_flags & MNT_NODEV)) {
2156 		/* Was the nodev implicitly added in mount? */
2157 		if ((mnt->mnt_ns->user_ns != &init_user_ns) &&
2158 		    !(sb->s_type->fs_flags & FS_USERNS_DEV_MOUNT)) {
2159 			mnt_flags |= MNT_NODEV;
2160 		} else {
2161 			return -EPERM;
2162 		}
2163 	}
2164 	if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
2165 	    !(mnt_flags & MNT_NOSUID)) {
2166 		return -EPERM;
2167 	}
2168 	if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) &&
2169 	    !(mnt_flags & MNT_NOEXEC)) {
2170 		return -EPERM;
2171 	}
2172 	if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) &&
2173 	    ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) {
2174 		return -EPERM;
2175 	}
2176 
2177 	err = security_sb_remount(sb, data);
2178 	if (err)
2179 		return err;
2180 
2181 	down_write(&sb->s_umount);
2182 	if (flags & MS_BIND)
2183 		err = change_mount_flags(path->mnt, flags);
2184 	else if (!capable(CAP_SYS_ADMIN))
2185 		err = -EPERM;
2186 	else
2187 		err = do_remount_sb(sb, flags, data, 0);
2188 	if (!err) {
2189 		lock_mount_hash();
2190 		mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
2191 		mnt->mnt.mnt_flags = mnt_flags;
2192 		touch_mnt_namespace(mnt->mnt_ns);
2193 		unlock_mount_hash();
2194 	}
2195 	up_write(&sb->s_umount);
2196 	return err;
2197 }
2198 
2199 static inline int tree_contains_unbindable(struct mount *mnt)
2200 {
2201 	struct mount *p;
2202 	for (p = mnt; p; p = next_mnt(p, mnt)) {
2203 		if (IS_MNT_UNBINDABLE(p))
2204 			return 1;
2205 	}
2206 	return 0;
2207 }
2208 
2209 static int do_move_mount(struct path *path, const char *old_name)
2210 {
2211 	struct path old_path, parent_path;
2212 	struct mount *p;
2213 	struct mount *old;
2214 	struct mountpoint *mp;
2215 	int err;
2216 	if (!old_name || !*old_name)
2217 		return -EINVAL;
2218 	err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
2219 	if (err)
2220 		return err;
2221 
2222 	mp = lock_mount(path);
2223 	err = PTR_ERR(mp);
2224 	if (IS_ERR(mp))
2225 		goto out;
2226 
2227 	old = real_mount(old_path.mnt);
2228 	p = real_mount(path->mnt);
2229 
2230 	err = -EINVAL;
2231 	if (!check_mnt(p) || !check_mnt(old))
2232 		goto out1;
2233 
2234 	if (old->mnt.mnt_flags & MNT_LOCKED)
2235 		goto out1;
2236 
2237 	err = -EINVAL;
2238 	if (old_path.dentry != old_path.mnt->mnt_root)
2239 		goto out1;
2240 
2241 	if (!mnt_has_parent(old))
2242 		goto out1;
2243 
2244 	if (d_is_dir(path->dentry) !=
2245 	      d_is_dir(old_path.dentry))
2246 		goto out1;
2247 	/*
2248 	 * Don't move a mount residing in a shared parent.
2249 	 */
2250 	if (IS_MNT_SHARED(old->mnt_parent))
2251 		goto out1;
2252 	/*
2253 	 * Don't move a mount tree containing unbindable mounts to a destination
2254 	 * mount which is shared.
2255 	 */
2256 	if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
2257 		goto out1;
2258 	err = -ELOOP;
2259 	for (; mnt_has_parent(p); p = p->mnt_parent)
2260 		if (p == old)
2261 			goto out1;
2262 
2263 	err = attach_recursive_mnt(old, real_mount(path->mnt), mp, &parent_path);
2264 	if (err)
2265 		goto out1;
2266 
2267 	/* if the mount is moved, it should no longer be expire
2268 	 * automatically */
2269 	list_del_init(&old->mnt_expire);
2270 out1:
2271 	unlock_mount(mp);
2272 out:
2273 	if (!err)
2274 		path_put(&parent_path);
2275 	path_put(&old_path);
2276 	return err;
2277 }
2278 
2279 static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype)
2280 {
2281 	int err;
2282 	const char *subtype = strchr(fstype, '.');
2283 	if (subtype) {
2284 		subtype++;
2285 		err = -EINVAL;
2286 		if (!subtype[0])
2287 			goto err;
2288 	} else
2289 		subtype = "";
2290 
2291 	mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL);
2292 	err = -ENOMEM;
2293 	if (!mnt->mnt_sb->s_subtype)
2294 		goto err;
2295 	return mnt;
2296 
2297  err:
2298 	mntput(mnt);
2299 	return ERR_PTR(err);
2300 }
2301 
2302 /*
2303  * add a mount into a namespace's mount tree
2304  */
2305 static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
2306 {
2307 	struct mountpoint *mp;
2308 	struct mount *parent;
2309 	int err;
2310 
2311 	mnt_flags &= ~MNT_INTERNAL_FLAGS;
2312 
2313 	mp = lock_mount(path);
2314 	if (IS_ERR(mp))
2315 		return PTR_ERR(mp);
2316 
2317 	parent = real_mount(path->mnt);
2318 	err = -EINVAL;
2319 	if (unlikely(!check_mnt(parent))) {
2320 		/* that's acceptable only for automounts done in private ns */
2321 		if (!(mnt_flags & MNT_SHRINKABLE))
2322 			goto unlock;
2323 		/* ... and for those we'd better have mountpoint still alive */
2324 		if (!parent->mnt_ns)
2325 			goto unlock;
2326 	}
2327 
2328 	/* Refuse the same filesystem on the same mount point */
2329 	err = -EBUSY;
2330 	if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb &&
2331 	    path->mnt->mnt_root == path->dentry)
2332 		goto unlock;
2333 
2334 	err = -EINVAL;
2335 	if (d_is_symlink(newmnt->mnt.mnt_root))
2336 		goto unlock;
2337 
2338 	newmnt->mnt.mnt_flags = mnt_flags;
2339 	err = graft_tree(newmnt, parent, mp);
2340 
2341 unlock:
2342 	unlock_mount(mp);
2343 	return err;
2344 }
2345 
2346 static bool fs_fully_visible(struct file_system_type *fs_type, int *new_mnt_flags);
2347 
2348 /*
2349  * create a new mount for userspace and request it to be added into the
2350  * namespace's tree
2351  */
2352 static int do_new_mount(struct path *path, const char *fstype, int flags,
2353 			int mnt_flags, const char *name, void *data)
2354 {
2355 	struct file_system_type *type;
2356 	struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
2357 	struct vfsmount *mnt;
2358 	int err;
2359 
2360 	if (!fstype)
2361 		return -EINVAL;
2362 
2363 	type = get_fs_type(fstype);
2364 	if (!type)
2365 		return -ENODEV;
2366 
2367 	if (user_ns != &init_user_ns) {
2368 		if (!(type->fs_flags & FS_USERNS_MOUNT)) {
2369 			put_filesystem(type);
2370 			return -EPERM;
2371 		}
2372 		/* Only in special cases allow devices from mounts
2373 		 * created outside the initial user namespace.
2374 		 */
2375 		if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) {
2376 			flags |= MS_NODEV;
2377 			mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
2378 		}
2379 		if (type->fs_flags & FS_USERNS_VISIBLE) {
2380 			if (!fs_fully_visible(type, &mnt_flags))
2381 				return -EPERM;
2382 		}
2383 	}
2384 
2385 	mnt = vfs_kern_mount(type, flags, name, data);
2386 	if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
2387 	    !mnt->mnt_sb->s_subtype)
2388 		mnt = fs_set_subtype(mnt, fstype);
2389 
2390 	put_filesystem(type);
2391 	if (IS_ERR(mnt))
2392 		return PTR_ERR(mnt);
2393 
2394 	err = do_add_mount(real_mount(mnt), path, mnt_flags);
2395 	if (err)
2396 		mntput(mnt);
2397 	return err;
2398 }
2399 
2400 int finish_automount(struct vfsmount *m, struct path *path)
2401 {
2402 	struct mount *mnt = real_mount(m);
2403 	int err;
2404 	/* The new mount record should have at least 2 refs to prevent it being
2405 	 * expired before we get a chance to add it
2406 	 */
2407 	BUG_ON(mnt_get_count(mnt) < 2);
2408 
2409 	if (m->mnt_sb == path->mnt->mnt_sb &&
2410 	    m->mnt_root == path->dentry) {
2411 		err = -ELOOP;
2412 		goto fail;
2413 	}
2414 
2415 	err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
2416 	if (!err)
2417 		return 0;
2418 fail:
2419 	/* remove m from any expiration list it may be on */
2420 	if (!list_empty(&mnt->mnt_expire)) {
2421 		namespace_lock();
2422 		list_del_init(&mnt->mnt_expire);
2423 		namespace_unlock();
2424 	}
2425 	mntput(m);
2426 	mntput(m);
2427 	return err;
2428 }
2429 
2430 /**
2431  * mnt_set_expiry - Put a mount on an expiration list
2432  * @mnt: The mount to list.
2433  * @expiry_list: The list to add the mount to.
2434  */
2435 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
2436 {
2437 	namespace_lock();
2438 
2439 	list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
2440 
2441 	namespace_unlock();
2442 }
2443 EXPORT_SYMBOL(mnt_set_expiry);
2444 
2445 /*
2446  * process a list of expirable mountpoints with the intent of discarding any
2447  * mountpoints that aren't in use and haven't been touched since last we came
2448  * here
2449  */
2450 void mark_mounts_for_expiry(struct list_head *mounts)
2451 {
2452 	struct mount *mnt, *next;
2453 	LIST_HEAD(graveyard);
2454 
2455 	if (list_empty(mounts))
2456 		return;
2457 
2458 	namespace_lock();
2459 	lock_mount_hash();
2460 
2461 	/* extract from the expiration list every vfsmount that matches the
2462 	 * following criteria:
2463 	 * - only referenced by its parent vfsmount
2464 	 * - still marked for expiry (marked on the last call here; marks are
2465 	 *   cleared by mntput())
2466 	 */
2467 	list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
2468 		if (!xchg(&mnt->mnt_expiry_mark, 1) ||
2469 			propagate_mount_busy(mnt, 1))
2470 			continue;
2471 		list_move(&mnt->mnt_expire, &graveyard);
2472 	}
2473 	while (!list_empty(&graveyard)) {
2474 		mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
2475 		touch_mnt_namespace(mnt->mnt_ns);
2476 		umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
2477 	}
2478 	unlock_mount_hash();
2479 	namespace_unlock();
2480 }
2481 
2482 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
2483 
2484 /*
2485  * Ripoff of 'select_parent()'
2486  *
2487  * search the list of submounts for a given mountpoint, and move any
2488  * shrinkable submounts to the 'graveyard' list.
2489  */
2490 static int select_submounts(struct mount *parent, struct list_head *graveyard)
2491 {
2492 	struct mount *this_parent = parent;
2493 	struct list_head *next;
2494 	int found = 0;
2495 
2496 repeat:
2497 	next = this_parent->mnt_mounts.next;
2498 resume:
2499 	while (next != &this_parent->mnt_mounts) {
2500 		struct list_head *tmp = next;
2501 		struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
2502 
2503 		next = tmp->next;
2504 		if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
2505 			continue;
2506 		/*
2507 		 * Descend a level if the d_mounts list is non-empty.
2508 		 */
2509 		if (!list_empty(&mnt->mnt_mounts)) {
2510 			this_parent = mnt;
2511 			goto repeat;
2512 		}
2513 
2514 		if (!propagate_mount_busy(mnt, 1)) {
2515 			list_move_tail(&mnt->mnt_expire, graveyard);
2516 			found++;
2517 		}
2518 	}
2519 	/*
2520 	 * All done at this level ... ascend and resume the search
2521 	 */
2522 	if (this_parent != parent) {
2523 		next = this_parent->mnt_child.next;
2524 		this_parent = this_parent->mnt_parent;
2525 		goto resume;
2526 	}
2527 	return found;
2528 }
2529 
2530 /*
2531  * process a list of expirable mountpoints with the intent of discarding any
2532  * submounts of a specific parent mountpoint
2533  *
2534  * mount_lock must be held for write
2535  */
2536 static void shrink_submounts(struct mount *mnt)
2537 {
2538 	LIST_HEAD(graveyard);
2539 	struct mount *m;
2540 
2541 	/* extract submounts of 'mountpoint' from the expiration list */
2542 	while (select_submounts(mnt, &graveyard)) {
2543 		while (!list_empty(&graveyard)) {
2544 			m = list_first_entry(&graveyard, struct mount,
2545 						mnt_expire);
2546 			touch_mnt_namespace(m->mnt_ns);
2547 			umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
2548 		}
2549 	}
2550 }
2551 
2552 /*
2553  * Some copy_from_user() implementations do not return the exact number of
2554  * bytes remaining to copy on a fault.  But copy_mount_options() requires that.
2555  * Note that this function differs from copy_from_user() in that it will oops
2556  * on bad values of `to', rather than returning a short copy.
2557  */
2558 static long exact_copy_from_user(void *to, const void __user * from,
2559 				 unsigned long n)
2560 {
2561 	char *t = to;
2562 	const char __user *f = from;
2563 	char c;
2564 
2565 	if (!access_ok(VERIFY_READ, from, n))
2566 		return n;
2567 
2568 	while (n) {
2569 		if (__get_user(c, f)) {
2570 			memset(t, 0, n);
2571 			break;
2572 		}
2573 		*t++ = c;
2574 		f++;
2575 		n--;
2576 	}
2577 	return n;
2578 }
2579 
2580 int copy_mount_options(const void __user * data, unsigned long *where)
2581 {
2582 	int i;
2583 	unsigned long page;
2584 	unsigned long size;
2585 
2586 	*where = 0;
2587 	if (!data)
2588 		return 0;
2589 
2590 	if (!(page = __get_free_page(GFP_KERNEL)))
2591 		return -ENOMEM;
2592 
2593 	/* We only care that *some* data at the address the user
2594 	 * gave us is valid.  Just in case, we'll zero
2595 	 * the remainder of the page.
2596 	 */
2597 	/* copy_from_user cannot cross TASK_SIZE ! */
2598 	size = TASK_SIZE - (unsigned long)data;
2599 	if (size > PAGE_SIZE)
2600 		size = PAGE_SIZE;
2601 
2602 	i = size - exact_copy_from_user((void *)page, data, size);
2603 	if (!i) {
2604 		free_page(page);
2605 		return -EFAULT;
2606 	}
2607 	if (i != PAGE_SIZE)
2608 		memset((char *)page + i, 0, PAGE_SIZE - i);
2609 	*where = page;
2610 	return 0;
2611 }
2612 
2613 char *copy_mount_string(const void __user *data)
2614 {
2615 	return data ? strndup_user(data, PAGE_SIZE) : NULL;
2616 }
2617 
2618 /*
2619  * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
2620  * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
2621  *
2622  * data is a (void *) that can point to any structure up to
2623  * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
2624  * information (or be NULL).
2625  *
2626  * Pre-0.97 versions of mount() didn't have a flags word.
2627  * When the flags word was introduced its top half was required
2628  * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
2629  * Therefore, if this magic number is present, it carries no information
2630  * and must be discarded.
2631  */
2632 long do_mount(const char *dev_name, const char __user *dir_name,
2633 		const char *type_page, unsigned long flags, void *data_page)
2634 {
2635 	struct path path;
2636 	int retval = 0;
2637 	int mnt_flags = 0;
2638 
2639 	/* Discard magic */
2640 	if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
2641 		flags &= ~MS_MGC_MSK;
2642 
2643 	/* Basic sanity checks */
2644 	if (data_page)
2645 		((char *)data_page)[PAGE_SIZE - 1] = 0;
2646 
2647 	/* ... and get the mountpoint */
2648 	retval = user_path(dir_name, &path);
2649 	if (retval)
2650 		return retval;
2651 
2652 	retval = security_sb_mount(dev_name, &path,
2653 				   type_page, flags, data_page);
2654 	if (!retval && !may_mount())
2655 		retval = -EPERM;
2656 	if (retval)
2657 		goto dput_out;
2658 
2659 	/* Default to relatime unless overriden */
2660 	if (!(flags & MS_NOATIME))
2661 		mnt_flags |= MNT_RELATIME;
2662 
2663 	/* Separate the per-mountpoint flags */
2664 	if (flags & MS_NOSUID)
2665 		mnt_flags |= MNT_NOSUID;
2666 	if (flags & MS_NODEV)
2667 		mnt_flags |= MNT_NODEV;
2668 	if (flags & MS_NOEXEC)
2669 		mnt_flags |= MNT_NOEXEC;
2670 	if (flags & MS_NOATIME)
2671 		mnt_flags |= MNT_NOATIME;
2672 	if (flags & MS_NODIRATIME)
2673 		mnt_flags |= MNT_NODIRATIME;
2674 	if (flags & MS_STRICTATIME)
2675 		mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
2676 	if (flags & MS_RDONLY)
2677 		mnt_flags |= MNT_READONLY;
2678 
2679 	/* The default atime for remount is preservation */
2680 	if ((flags & MS_REMOUNT) &&
2681 	    ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
2682 		       MS_STRICTATIME)) == 0)) {
2683 		mnt_flags &= ~MNT_ATIME_MASK;
2684 		mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK;
2685 	}
2686 
2687 	flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
2688 		   MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
2689 		   MS_STRICTATIME);
2690 
2691 	if (flags & MS_REMOUNT)
2692 		retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
2693 				    data_page);
2694 	else if (flags & MS_BIND)
2695 		retval = do_loopback(&path, dev_name, flags & MS_REC);
2696 	else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2697 		retval = do_change_type(&path, flags);
2698 	else if (flags & MS_MOVE)
2699 		retval = do_move_mount(&path, dev_name);
2700 	else
2701 		retval = do_new_mount(&path, type_page, flags, mnt_flags,
2702 				      dev_name, data_page);
2703 dput_out:
2704 	path_put(&path);
2705 	return retval;
2706 }
2707 
2708 static void free_mnt_ns(struct mnt_namespace *ns)
2709 {
2710 	ns_free_inum(&ns->ns);
2711 	put_user_ns(ns->user_ns);
2712 	kfree(ns);
2713 }
2714 
2715 /*
2716  * Assign a sequence number so we can detect when we attempt to bind
2717  * mount a reference to an older mount namespace into the current
2718  * mount namespace, preventing reference counting loops.  A 64bit
2719  * number incrementing at 10Ghz will take 12,427 years to wrap which
2720  * is effectively never, so we can ignore the possibility.
2721  */
2722 static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
2723 
2724 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
2725 {
2726 	struct mnt_namespace *new_ns;
2727 	int ret;
2728 
2729 	new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
2730 	if (!new_ns)
2731 		return ERR_PTR(-ENOMEM);
2732 	ret = ns_alloc_inum(&new_ns->ns);
2733 	if (ret) {
2734 		kfree(new_ns);
2735 		return ERR_PTR(ret);
2736 	}
2737 	new_ns->ns.ops = &mntns_operations;
2738 	new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
2739 	atomic_set(&new_ns->count, 1);
2740 	new_ns->root = NULL;
2741 	INIT_LIST_HEAD(&new_ns->list);
2742 	init_waitqueue_head(&new_ns->poll);
2743 	new_ns->event = 0;
2744 	new_ns->user_ns = get_user_ns(user_ns);
2745 	return new_ns;
2746 }
2747 
2748 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
2749 		struct user_namespace *user_ns, struct fs_struct *new_fs)
2750 {
2751 	struct mnt_namespace *new_ns;
2752 	struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
2753 	struct mount *p, *q;
2754 	struct mount *old;
2755 	struct mount *new;
2756 	int copy_flags;
2757 
2758 	BUG_ON(!ns);
2759 
2760 	if (likely(!(flags & CLONE_NEWNS))) {
2761 		get_mnt_ns(ns);
2762 		return ns;
2763 	}
2764 
2765 	old = ns->root;
2766 
2767 	new_ns = alloc_mnt_ns(user_ns);
2768 	if (IS_ERR(new_ns))
2769 		return new_ns;
2770 
2771 	namespace_lock();
2772 	/* First pass: copy the tree topology */
2773 	copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
2774 	if (user_ns != ns->user_ns)
2775 		copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED;
2776 	new = copy_tree(old, old->mnt.mnt_root, copy_flags);
2777 	if (IS_ERR(new)) {
2778 		namespace_unlock();
2779 		free_mnt_ns(new_ns);
2780 		return ERR_CAST(new);
2781 	}
2782 	new_ns->root = new;
2783 	list_add_tail(&new_ns->list, &new->mnt_list);
2784 
2785 	/*
2786 	 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
2787 	 * as belonging to new namespace.  We have already acquired a private
2788 	 * fs_struct, so tsk->fs->lock is not needed.
2789 	 */
2790 	p = old;
2791 	q = new;
2792 	while (p) {
2793 		q->mnt_ns = new_ns;
2794 		if (new_fs) {
2795 			if (&p->mnt == new_fs->root.mnt) {
2796 				new_fs->root.mnt = mntget(&q->mnt);
2797 				rootmnt = &p->mnt;
2798 			}
2799 			if (&p->mnt == new_fs->pwd.mnt) {
2800 				new_fs->pwd.mnt = mntget(&q->mnt);
2801 				pwdmnt = &p->mnt;
2802 			}
2803 		}
2804 		p = next_mnt(p, old);
2805 		q = next_mnt(q, new);
2806 		if (!q)
2807 			break;
2808 		while (p->mnt.mnt_root != q->mnt.mnt_root)
2809 			p = next_mnt(p, old);
2810 	}
2811 	namespace_unlock();
2812 
2813 	if (rootmnt)
2814 		mntput(rootmnt);
2815 	if (pwdmnt)
2816 		mntput(pwdmnt);
2817 
2818 	return new_ns;
2819 }
2820 
2821 /**
2822  * create_mnt_ns - creates a private namespace and adds a root filesystem
2823  * @mnt: pointer to the new root filesystem mountpoint
2824  */
2825 static struct mnt_namespace *create_mnt_ns(struct vfsmount *m)
2826 {
2827 	struct mnt_namespace *new_ns = alloc_mnt_ns(&init_user_ns);
2828 	if (!IS_ERR(new_ns)) {
2829 		struct mount *mnt = real_mount(m);
2830 		mnt->mnt_ns = new_ns;
2831 		new_ns->root = mnt;
2832 		list_add(&mnt->mnt_list, &new_ns->list);
2833 	} else {
2834 		mntput(m);
2835 	}
2836 	return new_ns;
2837 }
2838 
2839 struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
2840 {
2841 	struct mnt_namespace *ns;
2842 	struct super_block *s;
2843 	struct path path;
2844 	int err;
2845 
2846 	ns = create_mnt_ns(mnt);
2847 	if (IS_ERR(ns))
2848 		return ERR_CAST(ns);
2849 
2850 	err = vfs_path_lookup(mnt->mnt_root, mnt,
2851 			name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
2852 
2853 	put_mnt_ns(ns);
2854 
2855 	if (err)
2856 		return ERR_PTR(err);
2857 
2858 	/* trade a vfsmount reference for active sb one */
2859 	s = path.mnt->mnt_sb;
2860 	atomic_inc(&s->s_active);
2861 	mntput(path.mnt);
2862 	/* lock the sucker */
2863 	down_write(&s->s_umount);
2864 	/* ... and return the root of (sub)tree on it */
2865 	return path.dentry;
2866 }
2867 EXPORT_SYMBOL(mount_subtree);
2868 
2869 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
2870 		char __user *, type, unsigned long, flags, void __user *, data)
2871 {
2872 	int ret;
2873 	char *kernel_type;
2874 	char *kernel_dev;
2875 	unsigned long data_page;
2876 
2877 	kernel_type = copy_mount_string(type);
2878 	ret = PTR_ERR(kernel_type);
2879 	if (IS_ERR(kernel_type))
2880 		goto out_type;
2881 
2882 	kernel_dev = copy_mount_string(dev_name);
2883 	ret = PTR_ERR(kernel_dev);
2884 	if (IS_ERR(kernel_dev))
2885 		goto out_dev;
2886 
2887 	ret = copy_mount_options(data, &data_page);
2888 	if (ret < 0)
2889 		goto out_data;
2890 
2891 	ret = do_mount(kernel_dev, dir_name, kernel_type, flags,
2892 		(void *) data_page);
2893 
2894 	free_page(data_page);
2895 out_data:
2896 	kfree(kernel_dev);
2897 out_dev:
2898 	kfree(kernel_type);
2899 out_type:
2900 	return ret;
2901 }
2902 
2903 /*
2904  * Return true if path is reachable from root
2905  *
2906  * namespace_sem or mount_lock is held
2907  */
2908 bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
2909 			 const struct path *root)
2910 {
2911 	while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
2912 		dentry = mnt->mnt_mountpoint;
2913 		mnt = mnt->mnt_parent;
2914 	}
2915 	return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
2916 }
2917 
2918 int path_is_under(struct path *path1, struct path *path2)
2919 {
2920 	int res;
2921 	read_seqlock_excl(&mount_lock);
2922 	res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
2923 	read_sequnlock_excl(&mount_lock);
2924 	return res;
2925 }
2926 EXPORT_SYMBOL(path_is_under);
2927 
2928 /*
2929  * pivot_root Semantics:
2930  * Moves the root file system of the current process to the directory put_old,
2931  * makes new_root as the new root file system of the current process, and sets
2932  * root/cwd of all processes which had them on the current root to new_root.
2933  *
2934  * Restrictions:
2935  * The new_root and put_old must be directories, and  must not be on the
2936  * same file  system as the current process root. The put_old  must  be
2937  * underneath new_root,  i.e. adding a non-zero number of /.. to the string
2938  * pointed to by put_old must yield the same directory as new_root. No other
2939  * file system may be mounted on put_old. After all, new_root is a mountpoint.
2940  *
2941  * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
2942  * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
2943  * in this situation.
2944  *
2945  * Notes:
2946  *  - we don't move root/cwd if they are not at the root (reason: if something
2947  *    cared enough to change them, it's probably wrong to force them elsewhere)
2948  *  - it's okay to pick a root that isn't the root of a file system, e.g.
2949  *    /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
2950  *    though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
2951  *    first.
2952  */
2953 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
2954 		const char __user *, put_old)
2955 {
2956 	struct path new, old, parent_path, root_parent, root;
2957 	struct mount *new_mnt, *root_mnt, *old_mnt;
2958 	struct mountpoint *old_mp, *root_mp;
2959 	int error;
2960 
2961 	if (!may_mount())
2962 		return -EPERM;
2963 
2964 	error = user_path_dir(new_root, &new);
2965 	if (error)
2966 		goto out0;
2967 
2968 	error = user_path_dir(put_old, &old);
2969 	if (error)
2970 		goto out1;
2971 
2972 	error = security_sb_pivotroot(&old, &new);
2973 	if (error)
2974 		goto out2;
2975 
2976 	get_fs_root(current->fs, &root);
2977 	old_mp = lock_mount(&old);
2978 	error = PTR_ERR(old_mp);
2979 	if (IS_ERR(old_mp))
2980 		goto out3;
2981 
2982 	error = -EINVAL;
2983 	new_mnt = real_mount(new.mnt);
2984 	root_mnt = real_mount(root.mnt);
2985 	old_mnt = real_mount(old.mnt);
2986 	if (IS_MNT_SHARED(old_mnt) ||
2987 		IS_MNT_SHARED(new_mnt->mnt_parent) ||
2988 		IS_MNT_SHARED(root_mnt->mnt_parent))
2989 		goto out4;
2990 	if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
2991 		goto out4;
2992 	if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
2993 		goto out4;
2994 	error = -ENOENT;
2995 	if (d_unlinked(new.dentry))
2996 		goto out4;
2997 	error = -EBUSY;
2998 	if (new_mnt == root_mnt || old_mnt == root_mnt)
2999 		goto out4; /* loop, on the same file system  */
3000 	error = -EINVAL;
3001 	if (root.mnt->mnt_root != root.dentry)
3002 		goto out4; /* not a mountpoint */
3003 	if (!mnt_has_parent(root_mnt))
3004 		goto out4; /* not attached */
3005 	root_mp = root_mnt->mnt_mp;
3006 	if (new.mnt->mnt_root != new.dentry)
3007 		goto out4; /* not a mountpoint */
3008 	if (!mnt_has_parent(new_mnt))
3009 		goto out4; /* not attached */
3010 	/* make sure we can reach put_old from new_root */
3011 	if (!is_path_reachable(old_mnt, old.dentry, &new))
3012 		goto out4;
3013 	/* make certain new is below the root */
3014 	if (!is_path_reachable(new_mnt, new.dentry, &root))
3015 		goto out4;
3016 	root_mp->m_count++; /* pin it so it won't go away */
3017 	lock_mount_hash();
3018 	detach_mnt(new_mnt, &parent_path);
3019 	detach_mnt(root_mnt, &root_parent);
3020 	if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
3021 		new_mnt->mnt.mnt_flags |= MNT_LOCKED;
3022 		root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
3023 	}
3024 	/* mount old root on put_old */
3025 	attach_mnt(root_mnt, old_mnt, old_mp);
3026 	/* mount new_root on / */
3027 	attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp);
3028 	touch_mnt_namespace(current->nsproxy->mnt_ns);
3029 	/* A moved mount should not expire automatically */
3030 	list_del_init(&new_mnt->mnt_expire);
3031 	unlock_mount_hash();
3032 	chroot_fs_refs(&root, &new);
3033 	put_mountpoint(root_mp);
3034 	error = 0;
3035 out4:
3036 	unlock_mount(old_mp);
3037 	if (!error) {
3038 		path_put(&root_parent);
3039 		path_put(&parent_path);
3040 	}
3041 out3:
3042 	path_put(&root);
3043 out2:
3044 	path_put(&old);
3045 out1:
3046 	path_put(&new);
3047 out0:
3048 	return error;
3049 }
3050 
3051 static void __init init_mount_tree(void)
3052 {
3053 	struct vfsmount *mnt;
3054 	struct mnt_namespace *ns;
3055 	struct path root;
3056 	struct file_system_type *type;
3057 
3058 	type = get_fs_type("rootfs");
3059 	if (!type)
3060 		panic("Can't find rootfs type");
3061 	mnt = vfs_kern_mount(type, 0, "rootfs", NULL);
3062 	put_filesystem(type);
3063 	if (IS_ERR(mnt))
3064 		panic("Can't create rootfs");
3065 
3066 	ns = create_mnt_ns(mnt);
3067 	if (IS_ERR(ns))
3068 		panic("Can't allocate initial namespace");
3069 
3070 	init_task.nsproxy->mnt_ns = ns;
3071 	get_mnt_ns(ns);
3072 
3073 	root.mnt = mnt;
3074 	root.dentry = mnt->mnt_root;
3075 	mnt->mnt_flags |= MNT_LOCKED;
3076 
3077 	set_fs_pwd(current->fs, &root);
3078 	set_fs_root(current->fs, &root);
3079 }
3080 
3081 void __init mnt_init(void)
3082 {
3083 	unsigned u;
3084 	int err;
3085 
3086 	mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
3087 			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
3088 
3089 	mount_hashtable = alloc_large_system_hash("Mount-cache",
3090 				sizeof(struct hlist_head),
3091 				mhash_entries, 19,
3092 				0,
3093 				&m_hash_shift, &m_hash_mask, 0, 0);
3094 	mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
3095 				sizeof(struct hlist_head),
3096 				mphash_entries, 19,
3097 				0,
3098 				&mp_hash_shift, &mp_hash_mask, 0, 0);
3099 
3100 	if (!mount_hashtable || !mountpoint_hashtable)
3101 		panic("Failed to allocate mount hash table\n");
3102 
3103 	for (u = 0; u <= m_hash_mask; u++)
3104 		INIT_HLIST_HEAD(&mount_hashtable[u]);
3105 	for (u = 0; u <= mp_hash_mask; u++)
3106 		INIT_HLIST_HEAD(&mountpoint_hashtable[u]);
3107 
3108 	kernfs_init();
3109 
3110 	err = sysfs_init();
3111 	if (err)
3112 		printk(KERN_WARNING "%s: sysfs_init error: %d\n",
3113 			__func__, err);
3114 	fs_kobj = kobject_create_and_add("fs", NULL);
3115 	if (!fs_kobj)
3116 		printk(KERN_WARNING "%s: kobj create error\n", __func__);
3117 	init_rootfs();
3118 	init_mount_tree();
3119 }
3120 
3121 void put_mnt_ns(struct mnt_namespace *ns)
3122 {
3123 	if (!atomic_dec_and_test(&ns->count))
3124 		return;
3125 	drop_collected_mounts(&ns->root->mnt);
3126 	free_mnt_ns(ns);
3127 }
3128 
3129 struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
3130 {
3131 	struct vfsmount *mnt;
3132 	mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, data);
3133 	if (!IS_ERR(mnt)) {
3134 		/*
3135 		 * it is a longterm mount, don't release mnt until
3136 		 * we unmount before file sys is unregistered
3137 		*/
3138 		real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
3139 	}
3140 	return mnt;
3141 }
3142 EXPORT_SYMBOL_GPL(kern_mount_data);
3143 
3144 void kern_unmount(struct vfsmount *mnt)
3145 {
3146 	/* release long term mount so mount point can be released */
3147 	if (!IS_ERR_OR_NULL(mnt)) {
3148 		real_mount(mnt)->mnt_ns = NULL;
3149 		synchronize_rcu();	/* yecchhh... */
3150 		mntput(mnt);
3151 	}
3152 }
3153 EXPORT_SYMBOL(kern_unmount);
3154 
3155 bool our_mnt(struct vfsmount *mnt)
3156 {
3157 	return check_mnt(real_mount(mnt));
3158 }
3159 
3160 bool current_chrooted(void)
3161 {
3162 	/* Does the current process have a non-standard root */
3163 	struct path ns_root;
3164 	struct path fs_root;
3165 	bool chrooted;
3166 
3167 	/* Find the namespace root */
3168 	ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt;
3169 	ns_root.dentry = ns_root.mnt->mnt_root;
3170 	path_get(&ns_root);
3171 	while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
3172 		;
3173 
3174 	get_fs_root(current->fs, &fs_root);
3175 
3176 	chrooted = !path_equal(&fs_root, &ns_root);
3177 
3178 	path_put(&fs_root);
3179 	path_put(&ns_root);
3180 
3181 	return chrooted;
3182 }
3183 
3184 static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
3185 {
3186 	struct mnt_namespace *ns = current->nsproxy->mnt_ns;
3187 	int new_flags = *new_mnt_flags;
3188 	struct mount *mnt;
3189 	bool visible = false;
3190 
3191 	if (unlikely(!ns))
3192 		return false;
3193 
3194 	down_read(&namespace_sem);
3195 	list_for_each_entry(mnt, &ns->list, mnt_list) {
3196 		struct mount *child;
3197 		if (mnt->mnt.mnt_sb->s_type != type)
3198 			continue;
3199 
3200 		/* This mount is not fully visible if it's root directory
3201 		 * is not the root directory of the filesystem.
3202 		 */
3203 		if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
3204 			continue;
3205 
3206 		/* Verify the mount flags are equal to or more permissive
3207 		 * than the proposed new mount.
3208 		 */
3209 		if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
3210 		    !(new_flags & MNT_READONLY))
3211 			continue;
3212 		if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
3213 		    !(new_flags & MNT_NODEV))
3214 			continue;
3215 		if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) &&
3216 		    ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK)))
3217 			continue;
3218 
3219 		/* This mount is not fully visible if there are any
3220 		 * locked child mounts that cover anything except for
3221 		 * empty directories.
3222 		 */
3223 		list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
3224 			struct inode *inode = child->mnt_mountpoint->d_inode;
3225 			/* Only worry about locked mounts */
3226 			if (!(mnt->mnt.mnt_flags & MNT_LOCKED))
3227 				continue;
3228 			/* Is the directory permanetly empty? */
3229 			if (!is_empty_dir_inode(inode))
3230 				goto next;
3231 		}
3232 		/* Preserve the locked attributes */
3233 		*new_mnt_flags |= mnt->mnt.mnt_flags & (MNT_LOCK_READONLY | \
3234 							MNT_LOCK_NODEV    | \
3235 							MNT_LOCK_ATIME);
3236 		visible = true;
3237 		goto found;
3238 	next:	;
3239 	}
3240 found:
3241 	up_read(&namespace_sem);
3242 	return visible;
3243 }
3244 
3245 static struct ns_common *mntns_get(struct task_struct *task)
3246 {
3247 	struct ns_common *ns = NULL;
3248 	struct nsproxy *nsproxy;
3249 
3250 	task_lock(task);
3251 	nsproxy = task->nsproxy;
3252 	if (nsproxy) {
3253 		ns = &nsproxy->mnt_ns->ns;
3254 		get_mnt_ns(to_mnt_ns(ns));
3255 	}
3256 	task_unlock(task);
3257 
3258 	return ns;
3259 }
3260 
3261 static void mntns_put(struct ns_common *ns)
3262 {
3263 	put_mnt_ns(to_mnt_ns(ns));
3264 }
3265 
3266 static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
3267 {
3268 	struct fs_struct *fs = current->fs;
3269 	struct mnt_namespace *mnt_ns = to_mnt_ns(ns);
3270 	struct path root;
3271 
3272 	if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
3273 	    !ns_capable(current_user_ns(), CAP_SYS_CHROOT) ||
3274 	    !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
3275 		return -EPERM;
3276 
3277 	if (fs->users != 1)
3278 		return -EINVAL;
3279 
3280 	get_mnt_ns(mnt_ns);
3281 	put_mnt_ns(nsproxy->mnt_ns);
3282 	nsproxy->mnt_ns = mnt_ns;
3283 
3284 	/* Find the root */
3285 	root.mnt    = &mnt_ns->root->mnt;
3286 	root.dentry = mnt_ns->root->mnt.mnt_root;
3287 	path_get(&root);
3288 	while(d_mountpoint(root.dentry) && follow_down_one(&root))
3289 		;
3290 
3291 	/* Update the pwd and root */
3292 	set_fs_pwd(fs, &root);
3293 	set_fs_root(fs, &root);
3294 
3295 	path_put(&root);
3296 	return 0;
3297 }
3298 
3299 const struct proc_ns_operations mntns_operations = {
3300 	.name		= "mnt",
3301 	.type		= CLONE_NEWNS,
3302 	.get		= mntns_get,
3303 	.put		= mntns_put,
3304 	.install	= mntns_install,
3305 };
3306